code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Any = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase : int = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
_lowerCamelCase : List[str] = {
'gpt2': 1_024,
'gpt2-medium': 1_024,
'gpt2-large': 1_024,
'gpt2-xl': 1_024,
'distilgpt2': 1_024,
}
class lowercase ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCAmelCase : Any = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Any = ['input_ids', 'attention_mask']
UpperCAmelCase : Optional[Any] = GPTaTokenizer
def __init__( self : Union[str, Any] , snake_case : int=None , snake_case : Optional[int]=None , snake_case : List[str]=None , snake_case : Optional[Any]="<|endoftext|>" , snake_case : Optional[Any]="<|endoftext|>" , snake_case : Tuple="<|endoftext|>" , snake_case : Optional[Any]=False , **snake_case : List[str] , ):
'''simple docstring'''
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , add_prefix_space=_a , **_a , )
SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('add_bos_token' , _a )
SCREAMING_SNAKE_CASE : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _a ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(_a , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Tuple = pre_tok_class(**_a )
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
def lowerCamelCase_ ( self : Tuple , *snake_case : Tuple , **snake_case : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = kwargs.get('is_split_into_words' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def lowerCamelCase_ ( self : List[Any] , *snake_case : Union[str, Any] , **snake_case : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('is_split_into_words' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def lowerCamelCase_ ( self : int , snake_case : Dict , snake_case : Union[str, Any] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def lowerCamelCase_ ( self : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids | 352 |
def _A ( __snake_case :bytes ) -> str:
"""simple docstring"""
return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] )
def _A ( __snake_case :str ) -> bytes:
"""simple docstring"""
if (len(__snake_case ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__snake_case ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase__ : str = logging.get_logger(__name__)
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Tuple , *snake_case_ : List[Any] , **snake_case_ : str ):
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 347 |
from functools import lru_cache
def _A ( __snake_case :int ) -> set:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__snake_case ) )
def _A ( __snake_case :list ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) in (0, 1)
def _A ( __snake_case :int ) -> list:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
while True:
# Increment each value of a generated range
__SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def _A ( __snake_case :int = 4 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 693 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_a : Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Any ):
UpperCAmelCase = {}
with open(__snake_case , 'r' ) as file:
for line_number, line in enumerate(__snake_case ):
UpperCAmelCase = line.strip()
if line:
UpperCAmelCase = line.split()
UpperCAmelCase = line_number
UpperCAmelCase = words[0]
UpperCAmelCase = value
return result
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ):
for attribute in key.split('.' ):
UpperCAmelCase = getattr(__snake_case , __snake_case )
UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__snake_case ):
UpperCAmelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCAmelCase = 'param'
if weight_type is not None and weight_type != "param":
UpperCAmelCase = getattr(__snake_case , __snake_case ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase = hf_pointer
for attribute in hf_param_name.split('.' ):
UpperCAmelCase = getattr(__snake_case , __snake_case )
UpperCAmelCase = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase = value[0]
else:
UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
UpperCAmelCase = getattr(__snake_case , __snake_case )
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ):
UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__snake_case ):
UpperCAmelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCAmelCase = 'param'
if weight_type is not None and weight_type != "param":
UpperCAmelCase = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase = '.'.join([key, hf_param_name] )
else:
UpperCAmelCase = key
UpperCAmelCase = value if 'lm_head' in full_key else value[0]
_a : List[str] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : int=None ):
UpperCAmelCase = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(__snake_case )[0].split('.' )[-2]
UpperCAmelCase = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
UpperCAmelCase = 'weight_v'
elif "bias" in name:
UpperCAmelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase = 'weight'
else:
UpperCAmelCase = None
if hf_dict is not None:
rename_dict(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
else:
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
return is_used
return is_used
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
UpperCAmelCase = []
UpperCAmelCase = fairseq_model.state_dict()
UpperCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
UpperCAmelCase = True
else:
UpperCAmelCase = load_wavaveca_layer(__snake_case , __snake_case , __snake_case )
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
UpperCAmelCase = full_name.split('conv_layers.' )[-1]
UpperCAmelCase = name.split('.' )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
if config_path is not None:
UpperCAmelCase = WavaVecaConfig.from_pretrained(__snake_case )
else:
UpperCAmelCase = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase = read_txt_into_dict(__snake_case )
UpperCAmelCase = idalabel
UpperCAmelCase = WavaVecaForSequenceClassification(__snake_case )
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
feature_extractor.save_pretrained(__snake_case )
elif is_finetuned:
if dict_path:
UpperCAmelCase = Dictionary.load(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase = target_dict.pad_index
UpperCAmelCase = target_dict.bos_index
UpperCAmelCase = target_dict.eos_index
UpperCAmelCase = len(target_dict.symbols )
UpperCAmelCase = os.path.join(__snake_case , 'vocab.json' )
if not os.path.isdir(__snake_case ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase = 0
UpperCAmelCase = 1
with open(__snake_case , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__snake_case , __snake_case )
UpperCAmelCase = WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__snake_case , )
UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
UpperCAmelCase = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
UpperCAmelCase = WavaVecaForCTC(__snake_case )
else:
UpperCAmelCase = WavaVecaForPreTraining(__snake_case )
if is_finetuned or is_seq_class:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCAmelCase = argparse.Namespace(task='audio_pretraining' )
UpperCAmelCase = fairseq.tasks.setup_task(__snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__snake_case )
UpperCAmelCase = model[0].eval()
recursively_load_weights(__snake_case , __snake_case , not is_finetuned )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_a : Tuple = parser.parse_args()
_a : Any = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 447 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = False
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "kinetics" in model_name:
__SCREAMING_SNAKE_CASE = 400
__SCREAMING_SNAKE_CASE = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__SCREAMING_SNAKE_CASE = 174
__SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "small" in model_name:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = 768
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 2048
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 1280
__SCREAMING_SNAKE_CASE = 5120
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 640
__SCREAMING_SNAKE_CASE = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
if "encoder." in name:
__SCREAMING_SNAKE_CASE = name.replace("encoder." , "" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
return name
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case )
if key.startswith("encoder." ):
__SCREAMING_SNAKE_CASE = key.replace("encoder." , "" )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
if key.startswith("decoder.blocks" ):
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[2] )
__SCREAMING_SNAKE_CASE = "decoder.decoder_layers."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = "videomae.encoder.layer."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = np.load(__snake_case )
return list(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case )
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case )
else:
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
__SCREAMING_SNAKE_CASE = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
if "model" in files:
__SCREAMING_SNAKE_CASE = files["model"]
else:
__SCREAMING_SNAKE_CASE = files["module"]
__SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
__SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _a ( __SCREAMING_SNAKE_CASE ):
A = 42
class _a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
A = True
@register_to_config
def __init__(self, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = ("DownEncoderBlock2D",), SCREAMING_SNAKE_CASE_ = ("UpDecoderBlock2D",), SCREAMING_SNAKE_CASE_ = (64,), SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = "silu", SCREAMING_SNAKE_CASE_ = 4, SCREAMING_SNAKE_CASE_ = 32, SCREAMING_SNAKE_CASE_ = 32, SCREAMING_SNAKE_CASE_ = 0.1_8_2_1_5, ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_: int = Encoder(
in_channels=_a, out_channels=_a, down_block_types=_a, block_out_channels=_a, layers_per_block=_a, act_fn=_a, norm_num_groups=_a, double_z=_a, )
# pass init params to Decoder
UpperCAmelCase_: Optional[int] = Decoder(
in_channels=_a, out_channels=_a, up_block_types=_a, block_out_channels=_a, layers_per_block=_a, norm_num_groups=_a, act_fn=_a, )
UpperCAmelCase_: Any = nn.Convad(2 * latent_channels, 2 * latent_channels, 1 )
UpperCAmelCase_: List[str] = nn.Convad(_a, _a, 1 )
UpperCAmelCase_: int = False
UpperCAmelCase_: Optional[Any] = False
# only relevant if vae tiling is enabled
UpperCAmelCase_: List[str] = self.config.sample_size
UpperCAmelCase_: str = (
self.config.sample_size[0]
if isinstance(self.config.sample_size, (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_: List[str] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_: List[Any] = 0.2_5
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
if isinstance(_a, (Encoder, Decoder) ):
UpperCAmelCase_: str = value
def __snake_case (self, SCREAMING_SNAKE_CASE_ = True ) -> List[str]:
UpperCAmelCase_: Dict = use_tiling
def __snake_case (self ) -> List[str]:
self.enable_tiling(_a )
def __snake_case (self ) -> str:
UpperCAmelCase_: Dict = True
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Dict = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __snake_case (self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_: Union[str, Any] = {}
def fn_recursive_add_processors(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
if hasattr(_a, """set_processor""" ):
UpperCAmelCase_: Dict = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}', _a, _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a, _a, _a )
return processors
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Dict = len(self.attn_processors.keys() )
if isinstance(_a, _a ) and len(_a ) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(_a )} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
if hasattr(_a, """set_processor""" ):
if not isinstance(_a, _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(f'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}', _a, _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a, _a, _a )
def __snake_case (self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_a, return_dict=_a )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_: Any = [self.encoder(_a ) for x_slice in x.split(1 )]
UpperCAmelCase_: int = torch.cat(_a )
else:
UpperCAmelCase_: Optional[int] = self.encoder(_a )
UpperCAmelCase_: str = self.quant_conv(_a )
UpperCAmelCase_: Tuple = DiagonalGaussianDistribution(_a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_a )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_a, return_dict=_a )
UpperCAmelCase_: Dict = self.post_quant_conv(_a )
UpperCAmelCase_: Tuple = self.decoder(_a )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
@apply_forward_hook
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_: int = [self._decode(_a ).sample for z_slice in z.split(1 )]
UpperCAmelCase_: List[Any] = torch.cat(_a )
else:
UpperCAmelCase_: Dict = self._decode(_a ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_a )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCAmelCase_: int = min(a.shape[2], b.shape[2], _a )
for y in range(_a ):
UpperCAmelCase_: Optional[Any] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: List[str] = min(a.shape[3], b.shape[3], _a )
for x in range(_a ):
UpperCAmelCase_: Optional[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True ) -> AutoencoderKLOutput:
UpperCAmelCase_: int = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_: Optional[Any] = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_: Optional[Any] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_: str = []
for i in range(0, x.shape[2], _a ):
UpperCAmelCase_: Tuple = []
for j in range(0, x.shape[3], _a ):
UpperCAmelCase_: Optional[int] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_: Any = self.encoder(_a )
UpperCAmelCase_: List[str] = self.quant_conv(_a )
row.append(_a )
rows.append(_a )
UpperCAmelCase_: Optional[Any] = []
for i, row in enumerate(_a ):
UpperCAmelCase_: Union[str, Any] = []
for j, tile in enumerate(_a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_: Optional[Any] = self.blend_v(rows[i - 1][j], _a, _a )
if j > 0:
UpperCAmelCase_: str = self.blend_h(row[j - 1], _a, _a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_a, dim=3 ) )
UpperCAmelCase_: Dict = torch.cat(_a, dim=2 )
UpperCAmelCase_: Any = DiagonalGaussianDistribution(_a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_a )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_: List[str] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_: str = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_: Optional[int] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_: List[str] = []
for i in range(0, z.shape[2], _a ):
UpperCAmelCase_: List[Any] = []
for j in range(0, z.shape[3], _a ):
UpperCAmelCase_: Any = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_: Tuple = self.post_quant_conv(_a )
UpperCAmelCase_: Union[str, Any] = self.decoder(_a )
row.append(_a )
rows.append(_a )
UpperCAmelCase_: Dict = []
for i, row in enumerate(_a ):
UpperCAmelCase_: int = []
for j, tile in enumerate(_a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_: List[str] = self.blend_v(rows[i - 1][j], _a, _a )
if j > 0:
UpperCAmelCase_: Optional[int] = self.blend_h(row[j - 1], _a, _a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_a, dim=3 ) )
UpperCAmelCase_: List[str] = torch.cat(_a, dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_: Dict = sample
UpperCAmelCase_: List[str] = self.encode(_a ).latent_dist
if sample_posterior:
UpperCAmelCase_: List[Any] = posterior.sample(generator=_a )
else:
UpperCAmelCase_: Dict = posterior.mode()
UpperCAmelCase_: List[str] = self.decode(_a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
| 556 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__a : Union[str, Any] = CycleDiffusionPipeline
__a : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
__a : Any = PipelineTesterMixin.required_optional_params - {'''latents'''}
__a : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
__a : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
__a : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _UpperCAmelCase ( self ):
torch.manual_seed(0 )
UpperCamelCase_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCamelCase_ : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=10_00 , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
UpperCamelCase_ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCamelCase_ : Dict = CLIPTextModel(_a )
UpperCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase_ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
UpperCamelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
UpperCamelCase_ : int = image / 2 + 0.5
if str(_a ).startswith("""mps""" ):
UpperCamelCase_ : Optional[Any] = torch.manual_seed(_a )
else:
UpperCamelCase_ : List[str] = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ : List[Any] = self.get_dummy_components()
UpperCamelCase_ : List[Any] = CycleDiffusionPipeline(**_a )
UpperCamelCase_ : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase_ : str = self.get_dummy_inputs(_a )
UpperCamelCase_ : Optional[int] = pipe(**_a )
UpperCamelCase_ : Dict = output.images
UpperCamelCase_ : str = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
UpperCamelCase_ : int = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(_a , """half""" ):
UpperCamelCase_ : int = module.half()
UpperCamelCase_ : Union[str, Any] = CycleDiffusionPipeline(**_a )
UpperCamelCase_ : List[str] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase_ : Tuple = self.get_dummy_inputs(_a )
UpperCamelCase_ : List[str] = pipe(**_a )
UpperCamelCase_ : Any = output.images
UpperCamelCase_ : str = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
UpperCamelCase_ : str = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _UpperCAmelCase ( self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _UpperCAmelCase ( self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _UpperCAmelCase ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _UpperCAmelCase ( self ):
return super().test_save_load_optional_components()
@skip_mps
def _UpperCAmelCase ( self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
UpperCamelCase_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
UpperCamelCase_ : int = init_image.resize((5_12, 5_12) )
UpperCamelCase_ : Tuple = """CompVis/stable-diffusion-v1-4"""
UpperCamelCase_ : Dict = DDIMScheduler.from_pretrained(_a , subfolder="""scheduler""" )
UpperCamelCase_ : List[Any] = CycleDiffusionPipeline.from_pretrained(
_a , scheduler=_a , safety_checker=_a , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
UpperCamelCase_ : int = """A black colored car"""
UpperCamelCase_ : Optional[int] = """A blue colored car"""
UpperCamelCase_ : Dict = torch.manual_seed(0 )
UpperCamelCase_ : Any = pipe(
prompt=_a , source_prompt=_a , image=_a , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_a , output_type="""np""" , )
UpperCamelCase_ : List[Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
UpperCamelCase_ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
UpperCamelCase_ : Optional[int] = init_image.resize((5_12, 5_12) )
UpperCamelCase_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
UpperCamelCase_ : List[str] = DDIMScheduler.from_pretrained(_a , subfolder="""scheduler""" )
UpperCamelCase_ : Tuple = CycleDiffusionPipeline.from_pretrained(_a , scheduler=_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
UpperCamelCase_ : str = """A black colored car"""
UpperCamelCase_ : List[Any] = """A blue colored car"""
UpperCamelCase_ : List[str] = torch.manual_seed(0 )
UpperCamelCase_ : List[str] = pipe(
prompt=_a , source_prompt=_a , image=_a , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_a , output_type="""np""" , )
UpperCamelCase_ : Any = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 208 |
from math import sqrt
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def _A ( __snake_case :int = 1_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 | 0 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __magic_name__ : int ) -> list[int]:
lowercase : List[Any] =[]
lowercase : Optional[int] =2
lowercase : Optional[Any] =int(math.sqrt(__snake_case ) ) # Size of every segment
lowercase : Union[str, Any] =[True] * (end + 1)
lowercase : Any =[]
while start <= end:
if temp[start] is True:
in_prime.append(__snake_case )
for i in range(start * start , end + 1 , __snake_case ):
lowercase : Any =False
start += 1
prime += in_prime
lowercase : List[Any] =end + 1
lowercase : Dict =min(2 * end , __snake_case )
while low <= n:
lowercase : List[Any] =[True] * (high - low + 1)
for each in in_prime:
lowercase : List[str] =math.floor(low / each ) * each
if t < low:
t += each
for j in range(__snake_case , high + 1 , __snake_case ):
lowercase : Any =False
for j in range(len(__snake_case ) ):
if temp[j] is True:
prime.append(j + low )
lowercase : List[Any] =high + 1
lowercase : Optional[Any] =min(high + end , __snake_case )
return prime
print(sieve(10**6))
| 92 |
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__snake_case : Optional[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowercase_ ( unittest.TestCase ):
a_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = ZeroShotClassificationPipeline(
model=_a , tokenizer=_a , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(_a , {"sequence": ANY(_a ), "labels": [ANY(_a )], "scores": [ANY(_a )]} )
# No kwarg
UpperCAmelCase_ = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(_a , {"sequence": ANY(_a ), "labels": [ANY(_a )], "scores": [ANY(_a )]} )
UpperCAmelCase_ = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(_a , {"sequence": ANY(_a ), "labels": [ANY(_a )], "scores": [ANY(_a )]} )
UpperCAmelCase_ = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
_a , {"sequence": ANY(_a ), "labels": [ANY(_a ), ANY(_a )], "scores": [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
UpperCAmelCase_ = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
_a , {"sequence": ANY(_a ), "labels": [ANY(_a ), ANY(_a )], "scores": [ANY(_a ), ANY(_a )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
UpperCAmelCase_ = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(_a , {"sequence": ANY(_a ), "labels": [ANY(_a )], "scores": [ANY(_a )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCAmelCase_ = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
_a , [
{"sequence": ANY(_a ), "labels": [ANY(_a ), ANY(_a )], "scores": [ANY(_a ), ANY(_a )]}
for i in range(1 )
] , )
UpperCAmelCase_ = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
_a , [
{"sequence": ANY(_a ), "labels": [ANY(_a ), ANY(_a )], "scores": [ANY(_a ), ANY(_a )]}
for i in range(2 )
] , )
with self.assertRaises(_a ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(_a ):
classifier(_a , candidate_labels="politics" )
with self.assertRaises(_a ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(_a ):
classifier("Who are you voting for in 2020?" , candidate_labels=_a )
with self.assertRaises(_a ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(_a ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=_a , )
self.run_entailment_id(_a )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = zero_shot_classifier.model.config
UpperCAmelCase_ = config.labelaid
UpperCAmelCase_ = zero_shot_classifier.entailment_id
UpperCAmelCase_ = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
UpperCAmelCase_ = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase_ = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase_ = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
UpperCAmelCase_ = original_labelaid
self.assertEqual(_a , zero_shot_classifier.entailment_id )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 1_0_0 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
UpperCAmelCase_ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_a ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@require_tf
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
UpperCAmelCase_ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_a ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
UpperCAmelCase_ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_a ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
UpperCAmelCase_ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def lowerCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
UpperCAmelCase_ = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_a ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
UpperCAmelCase_ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=_a , )
self.assertEqual(
nested_simplify(_a ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
| 660 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = encoder_seq_length
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_layers
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = config.num_attention_heads
__SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a )
return config, input_dict
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig(
vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a )
__SCREAMING_SNAKE_CASE = result.last_hidden_state
__SCREAMING_SNAKE_CASE = result.past_key_values
__SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
__SCREAMING_SNAKE_CASE = model(_a )
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) )
def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval()
__SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ =[0.8, 0.9]
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], )
@unittest.skipIf(torch_device == "cpu", "Cant do half precision" )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs[0]
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
__SCREAMING_SNAKE_CASE = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
}
for attn_name, (name, mask) in zip(_a, head_masking.items() ):
__SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_heads, device=_a )
__SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, )
# We check the state of decoder_attentions and cross_attentions just from the last step
__SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __lowerCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a )
__SCREAMING_SNAKE_CASE = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a, _a )
__SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) )
__SCREAMING_SNAKE_CASE = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a )
self.assertEqual(_a, _a )
| 693 | 0 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__(self , lowerCAmelCase__=0.0_1 , lowerCAmelCase__=1_0_0_0 ):
_UpperCAmelCase : str = p_stop
_UpperCAmelCase : Dict = max_length
def __iter__(self ):
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Union[str, Any] = False
while not stop and count < self.max_length:
yield count
count += 1
_UpperCAmelCase : List[Any] = random.random() < self.p_stop
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=True ):
_UpperCAmelCase : Optional[Any] = [
BatchSamplerShard(_a , 2 , _a , split_batches=_a , even_batches=_a )
for i in range(2 )
]
_UpperCAmelCase : Dict = [list(_a ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_a ) for shard in batch_sampler_shards] , [len(_a ) for e in expected] )
self.assertListEqual(_a , _a )
def snake_case_ (self ):
# Check the shards when the dataset is a round multiple of total batch size.
_UpperCAmelCase : Any = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(_a , _a )
_UpperCAmelCase : List[Any] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCAmelCase : Dict = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(_a , _a )
_UpperCAmelCase : List[str] = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCAmelCase : Optional[int] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(_a , _a )
_UpperCAmelCase : List[str] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCAmelCase : Optional[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(_a , _a )
_UpperCAmelCase : int = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a )
# Check the shards when the dataset is very small.
_UpperCAmelCase : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : int = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_a , _a )
_UpperCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : List[str] = [[], []]
self.check_batch_sampler_shards(_a , _a )
def snake_case_ (self ):
# Check the shards when the dataset is a round multiple of batch size.
_UpperCAmelCase : Dict = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_a )
_UpperCAmelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
_UpperCAmelCase : Any = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCAmelCase : str = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_a )
_UpperCAmelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
_UpperCAmelCase : Optional[int] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_a )
_UpperCAmelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCAmelCase : Tuple = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_a )
_UpperCAmelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
_UpperCAmelCase : List[Any] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_a )
_UpperCAmelCase : int = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
# Check the shards when the dataset is very small.
_UpperCAmelCase : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
_UpperCAmelCase : Dict = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
_UpperCAmelCase : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
_UpperCAmelCase : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(_a , _a , split_batches=_a )
def snake_case_ (self ):
# Check the shards when the dataset is a round multiple of total batch size.
_UpperCAmelCase : List[str] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
_UpperCAmelCase : int = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCAmelCase : int = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
_UpperCAmelCase : Tuple = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCAmelCase : Optional[int] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
_UpperCAmelCase : List[str] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCAmelCase : Optional[int] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
_UpperCAmelCase : Optional[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
# Check the shards when the dataset is very small.
_UpperCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : Any = [[[0, 1]], []]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
_UpperCAmelCase : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=_a )
_UpperCAmelCase : Optional[int] = [[], []]
self.check_batch_sampler_shards(_a , _a , even_batches=_a )
def snake_case_ (self ):
# Check the shards when the dataset is a round multiple of batch size.
_UpperCAmelCase : Union[str, Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_a )
_UpperCAmelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
_UpperCAmelCase : Optional[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=_a )
# Expected shouldn't change
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCAmelCase : Optional[Any] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_a )
_UpperCAmelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
_UpperCAmelCase : int = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=_a )
_UpperCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCAmelCase : List[Any] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_a )
_UpperCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
_UpperCAmelCase : Any = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=_a )
_UpperCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
# Check the shards when the dataset is very small.
_UpperCAmelCase : int = BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
_UpperCAmelCase : Dict = [[[0, 1]], []]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
_UpperCAmelCase : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_a )
_UpperCAmelCase : Dict = [[], []]
self.check_batch_sampler_shards(_a , _a , split_batches=_a , even_batches=_a )
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
_UpperCAmelCase : List[Any] = [BatchSamplerShard(_a , 2 , _a , even_batches=_a ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=2 , lowerCAmelCase__=False ):
random.seed(_a )
_UpperCAmelCase : Tuple = list(_a )
_UpperCAmelCase : Any = [
IterableDatasetShard(
_a , batch_size=_a , drop_last=_a , num_processes=_a , process_index=_a , split_batches=_a , )
for i in range(_a )
]
_UpperCAmelCase : List[str] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_a )
iterable_dataset_lists.append(list(_a ) )
_UpperCAmelCase : Optional[int] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_UpperCAmelCase : int = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_a ) , len(_a ) )
self.assertTrue(len(_a ) % shard_batch_size == 0 )
_UpperCAmelCase : List[str] = []
for idx in range(0 , len(_a ) , _a ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_a ) < len(_a ):
reference += reference
self.assertListEqual(_a , reference[: len(_a )] )
def snake_case_ (self ):
_UpperCAmelCase : Any = 4_2
_UpperCAmelCase : Optional[int] = RandomIterableDataset()
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
# Edge case with a very small dataset
_UpperCAmelCase : Union[str, Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
self.check_iterable_dataset_shards(_a , _a , batch_size=4 , drop_last=_a , split_batches=_a )
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=_a )
_UpperCAmelCase : int = SkipBatchSampler(_a , 2 )
self.assertListEqual(list(_a ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def snake_case_ (self ):
_UpperCAmelCase : Any = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = DataLoader(list(range(1_6 ) ) , batch_size=4 )
_UpperCAmelCase : List[Any] = skip_first_batches(_a , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def snake_case_ (self ):
_UpperCAmelCase : str = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def snake_case_ (self ):
Accelerator()
_UpperCAmelCase : Union[str, Any] = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 414 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__SCREAMING_SNAKE_CASE = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
__SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(__snake_case :str ):
for patt, repl in iter(__snake_case ):
__SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case )
return f'''bert/{name}'''
def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ):
__SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case )
__SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE = torch_tensor.T
__SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = session.run(__snake_case )
print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' )
__SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) )
def _A ( __snake_case :str=None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" )
__SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case )
__SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 693 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 101 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""]
def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str:
super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a )
__SCREAMING_SNAKE_CASE = chunk_length_s
__SCREAMING_SNAKE_CASE = overlap
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = bool(
isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) )
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a, np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa )
elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
__SCREAMING_SNAKE_CASE = "max_length"
else:
__SCREAMING_SNAKE_CASE = input_values
# normal padding on batch
if padded_inputs is None:
__SCREAMING_SNAKE_CASE = self.pad(
_a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, )
if padding:
__SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" )
__SCREAMING_SNAKE_CASE = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
__SCREAMING_SNAKE_CASE = example[..., None]
input_values.append(example.T )
__SCREAMING_SNAKE_CASE = input_values
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 693 | 0 |
import os
import string
import sys
lowerCAmelCase_ = 1 << 8
lowerCAmelCase_ = {
'tab': ord('''\t'''),
'newline': ord('''\r'''),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase_ = KEYMAP['up']
lowerCAmelCase_ = KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase_ = []
lowerCAmelCase_ = {
B'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
B'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase_ = ord(str(i))
def __SCREAMING_SNAKE_CASE ():
if os.name == "nt":
import msvcrt
snake_case_ = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__snake_case ) == 0:
# Read the keystroke
snake_case_ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case_ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case_ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(__snake_case )
if ord(__snake_case ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case_ = chr(KEYMAP['''esc'''] )
except KeyError:
snake_case_ = cha[1]
else:
snake_case_ = ch.decode(__snake_case )
else:
snake_case_ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case_ = sys.stdin.fileno()
snake_case_ = termios.tcgetattr(__snake_case )
try:
tty.setraw(__snake_case )
snake_case_ = sys.stdin.read(1 )
finally:
termios.tcsetattr(__snake_case , termios.TCSADRAIN , __snake_case )
return ch
def __SCREAMING_SNAKE_CASE ():
snake_case_ = get_raw_chars()
if ord(__snake_case ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__snake_case ) == KEYMAP["esc"]:
snake_case_ = get_raw_chars()
if ord(__snake_case ) == KEYMAP["mod_int"]:
snake_case_ = get_raw_chars()
if ord(__snake_case ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__snake_case ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__snake_case ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 39 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =42
SCREAMING_SNAKE_CASE__ =42
def __init__( self, _a, _a ) -> Dict:
super().__init__()
self.register_modules(unet=_a, scheduler=_a )
@torch.no_grad()
def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]:
__SCREAMING_SNAKE_CASE = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE = self.unet
__SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE = sample.to(self.device )
self.scheduler.set_timesteps(_a )
self.scheduler.set_sigmas(_a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample
# prediction step
__SCREAMING_SNAKE_CASE = model(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean
__SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 )
__SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_a )
| 693 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class a__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : int ,*a__ : Optional[Any] ,**a__ : Any) -> None:
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' ,_a ,)
super().__init__(*_a ,**_a)
| 227 |
def _A ( __snake_case :int = 400_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
SCREAMING_SNAKE_CASE : Union[str, Any] = cva.getAffineTransform(__snake_case , __snake_case )
return cva.warpAffine(__snake_case , __snake_case , (rows, cols) )
if __name__ == "__main__":
# read original image
_lowerCamelCase : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
_lowerCamelCase : Dict = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_lowerCamelCase : Tuple = gray_img.shape
# set different points to rotate image
_lowerCamelCase : int = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
_lowerCamelCase : str = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
_lowerCamelCase : Dict = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
_lowerCamelCase : List[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
_lowerCamelCase : str = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_lowerCamelCase : Any = plt.figure(1)
_lowerCamelCase : Optional[Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show() | 352 |
from __future__ import annotations
_snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(__snake_case )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 693 | 0 |
'''simple docstring'''
def _a ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : list[int] ):
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def _a ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ):
"""simple docstring"""
if curr_ind == len(__snake_case ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__snake_case ) ):
if valid_connection(__snake_case , __snake_case , __snake_case , __snake_case ):
# Insert current vertex into path as next transition
snake_case__ : Optional[int] = next_ver
# Validate created path
if util_hamilton_cycle(__snake_case , __snake_case , curr_ind + 1 ):
return True
# Backtrack
snake_case__ : int = -1
return False
def _a ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int = 0 ):
"""simple docstring"""
snake_case__ : Optional[Any] = [-1] * (len(__snake_case ) + 1)
# initialize start and end of path with starting index
snake_case__ : List[str] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__snake_case , __snake_case , 1 ) else []
| 347 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
return f'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __getitem__( self, _a ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, _a, _a ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
__SCREAMING_SNAKE_CASE = self.head
for _ in range(_a ):
__SCREAMING_SNAKE_CASE = current.next
__SCREAMING_SNAKE_CASE = data
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(len(self ), _a )
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(0, _a )
def __lowerCAmelCase ( self, _a, _a ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
__SCREAMING_SNAKE_CASE = Node(_a )
if self.head is None:
__SCREAMING_SNAKE_CASE = new_node
elif index == 0:
__SCREAMING_SNAKE_CASE = self.head # link new_node to head
__SCREAMING_SNAKE_CASE = new_node
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = new_node
def __lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def __lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self, _a = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
__SCREAMING_SNAKE_CASE = self.head # default first node
if index == 0:
__SCREAMING_SNAKE_CASE = self.head.next
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> bool:
return self.head is None
def __lowerCAmelCase ( self ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.head
while current:
# Store the current node's next node.
__SCREAMING_SNAKE_CASE = current.next
# Make the current node's next point backwards
__SCREAMING_SNAKE_CASE = prev
# Make the previous node be the current node
__SCREAMING_SNAKE_CASE = current
# Make the current node the next node (to progress iteration)
__SCREAMING_SNAKE_CASE = next_node
# Return prev in order to put the head at the end
__SCREAMING_SNAKE_CASE = prev
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__SCREAMING_SNAKE_CASE = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__SCREAMING_SNAKE_CASE = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__SCREAMING_SNAKE_CASE = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__SCREAMING_SNAKE_CASE = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__snake_case )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
__SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip()
print("New list:" )
print(__snake_case )
print(f'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 693 | 0 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a : Any = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_a : Optional[Any] = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
_a : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_a : Optional[int] = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_a : Optional[int] = 'allenai'
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[str] ):
UpperCAmelCase = dict((re.sub(R'@@$' , '' , __snake_case ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , __snake_case ), v) for k, v in d.items() )
UpperCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
UpperCAmelCase = d[k] # restore
return da
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ):
assert os.path.exists(__snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCAmelCase = basename(__snake_case )
UpperCAmelCase = dirname(__snake_case )
UpperCAmelCase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCAmelCase = cls.hub_models()
UpperCAmelCase = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
UpperCAmelCase = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
UpperCAmelCase = hub_utils.from_pretrained(
__snake_case , __snake_case , __snake_case , archive_map=__snake_case , **__snake_case )
UpperCAmelCase = vars(chkpt['args']['model'] )
UpperCAmelCase = args['source_lang']
UpperCAmelCase = args['target_lang']
UpperCAmelCase = dirname(__snake_case )
UpperCAmelCase = basename(__snake_case )
# dicts
UpperCAmelCase = os.path.join(__snake_case , f'''dict.{src_lang}.txt''' )
UpperCAmelCase = os.path.join(__snake_case , f'''dict.{tgt_lang}.txt''' )
UpperCAmelCase = Dictionary.load(__snake_case )
UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase = len(__snake_case )
UpperCAmelCase = os.path.join(__snake_case , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCAmelCase = True
for k in src_vocab.keys():
if not k.islower():
UpperCAmelCase = False
break
UpperCAmelCase = Dictionary.load(__snake_case )
UpperCAmelCase = rewrite_dict_keys(tgt_dict.indices )
UpperCAmelCase = len(__snake_case )
UpperCAmelCase = os.path.join(__snake_case , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# merges_file (bpecodes)
UpperCAmelCase = os.path.join(__snake_case , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCAmelCase = os.path.join(__snake_case , __snake_case )
if os.path.exists(__snake_case ):
break
with open(__snake_case , encoding='utf-8' ) as fin:
UpperCAmelCase = fin.read()
UpperCAmelCase = re.sub(R' \d+$' , '' , __snake_case , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(__snake_case , 'w' , encoding='utf-8' ) as fout:
fout.write(__snake_case )
# model config
UpperCAmelCase = os.path.join(__snake_case , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
UpperCAmelCase = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
UpperCAmelCase = 5
UpperCAmelCase = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCAmelCase = best_score_hparams[model_dir]['length_penalty']
else:
UpperCAmelCase = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# tokenizer config
UpperCAmelCase = os.path.join(__snake_case , __snake_case )
UpperCAmelCase = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# model
UpperCAmelCase = chkpt['models'][0]
UpperCAmelCase = model.state_dict()
# rename keys to start with 'model.'
UpperCAmelCase = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCAmelCase = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(__snake_case , __snake_case )
UpperCAmelCase = FSMTConfig.from_pretrained(__snake_case )
UpperCAmelCase = FSMTForConditionalGeneration(__snake_case )
# check that it loads ok
model_new.load_state_dict(__snake_case , strict=__snake_case )
# save
UpperCAmelCase = os.path.join(__snake_case , __snake_case )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(__snake_case , __snake_case )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
_a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a : Union[str, Any] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 447 |
import argparse
import json
from tqdm import tqdm
def _A ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
__SCREAMING_SNAKE_CASE = json.load(__snake_case )
for dpr_record in tqdm(__snake_case ):
__SCREAMING_SNAKE_CASE = dpr_record["question"]
__SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__snake_case ) + "\n" )
if __name__ == "__main__":
main()
| 693 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Any = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__snake_case , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__snake_case , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__snake_case )
return parser.parse_args()
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = parse_args()
# Import training_script as a module.
UpperCAmelCase_: Any = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase_: Tuple = script_fpath.stem
UpperCAmelCase_: Any = importlib.import_module(__snake_case )
# Patch sys.argv
UpperCAmelCase_: List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 556 |
def _A ( __snake_case :int = 10**9 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase =16
UpperCamelCase =32
def snake_case ( a_ : Accelerator , a_ : int = 16 ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase_ : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(a_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ : str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ : List[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(a_ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ : List[Any] = 8
else:
UpperCamelCase_ : List[str] = None
return tokenizer.pad(
__snake_case , padding="""longest""" , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase_ : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
UpperCamelCase_ : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase =mocked_dataloaders # noqa: F811
def snake_case ( a_ : Tuple , a_ : Dict ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __snake_case ) == "1":
UpperCamelCase_ : int = 2
# New Code #
UpperCamelCase_ : Dict = int(args.gradient_accumulation_steps )
UpperCamelCase_ : Optional[int] = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase_ : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ : List[Any] = config["""lr"""]
UpperCamelCase_ : int = int(config["""num_epochs"""] )
UpperCamelCase_ : int = int(config["""seed"""] )
UpperCamelCase_ : Optional[int] = int(config["""batch_size"""] )
UpperCamelCase_ : Any = evaluate.load("""glue""" , """mrpc""" )
set_seed(__snake_case )
UpperCamelCase_ , UpperCamelCase_ : str = get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase_ : Dict = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
UpperCamelCase_ : int = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : List[Any] = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
with LocalSGD(
accelerator=__snake_case , model=__snake_case , local_sgd_steps=__snake_case , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__snake_case ):
UpperCamelCase_ : str = model(**__snake_case )
UpperCamelCase_ : List[str] = output.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_ : Union[str, Any] = model(**__snake_case )
UpperCamelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
UpperCamelCase_ : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , __snake_case )
def snake_case ( ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Dict = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__snake_case , default=__snake_case , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__snake_case , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=__snake_case , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase_ : int = parser.parse_args()
UpperCamelCase_ : List[str] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 208 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_snake_case , _snake_case , _snake_case : List[Any] = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =None
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =None
# Automatically constructed
SCREAMING_SNAKE_CASE__ ="dict"
SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Optional[int]:
return self.pa_type
def __lowerCAmelCase ( self, _a ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_a, _a ):
return {"bytes": None, "path": value}
elif isinstance(_a, _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__SCREAMING_SNAKE_CASE = BytesIO()
sf.write(_a, value["array"], value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
__SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67
__SCREAMING_SNAKE_CASE = BytesIO(bytes() )
sf.write(_a, _a, value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self, _a, _a = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
__SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
__SCREAMING_SNAKE_CASE = token_per_repo_id or {}
__SCREAMING_SNAKE_CASE = path.split("::" )[-1]
try:
__SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"]
__SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__SCREAMING_SNAKE_CASE = None
with xopen(_a, "rb", use_auth_token=_a ) as f:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
__SCREAMING_SNAKE_CASE = array.T
if self.mono:
__SCREAMING_SNAKE_CASE = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate )
__SCREAMING_SNAKE_CASE = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
__SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("bytes" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("path" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
return array_cast(_a, self.pa_type )
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a, "rb" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
return bytes_
__SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
__SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
| 693 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : str=18 , UpperCAmelCase__ : List[str]=30 , UpperCAmelCase__ : Optional[int]=400 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[str]=True , ):
'''simple docstring'''
lowercase : Dict =size if size is not None else {'''height''': 18, '''width''': 18}
lowercase : Optional[int] =parent
lowercase : Union[str, Any] =batch_size
lowercase : Dict =num_channels
lowercase : List[Any] =image_size
lowercase : Tuple =min_resolution
lowercase : Tuple =max_resolution
lowercase : Optional[Any] =do_resize
lowercase : Any =size
lowercase : Optional[int] =apply_ocr
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =LayoutLMvaImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''apply_ocr''' ) )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
lowercase : Any =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
# Initialize image_processing
lowercase : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
lowercase : Dict =image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , _a )
self.assertIsInstance(encoding.boxes , _a )
# Test batched
lowercase : int =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processing
lowercase : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
lowercase : Dict =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase : List[str] =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
# Initialize image_processing
lowercase : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
lowercase : Tuple =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase : List[str] =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
# with apply_OCR = True
lowercase : Any =LayoutLMvaImageProcessor()
from datasets import load_dataset
lowercase : str =load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
lowercase : Tuple =Image.open(ds[0]['''file'''] ).convert('''RGB''' )
lowercase : int =image_processing(_a , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowercase : Optional[Any] =[['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
lowercase : Union[str, Any] =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _a )
self.assertListEqual(encoding.boxes , _a )
# with apply_OCR = False
lowercase : Optional[int] =LayoutLMvaImageProcessor(apply_ocr=_a )
lowercase : Optional[int] =image_processing(_a , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 92 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,)
SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),)
def __lowerCAmelCase ( self, **_a ) -> str:
__SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00}
config.update(**_a )
return config
def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self, _a=0, **_a ) -> int:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, **_a ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_a, "set_timesteps" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ):
__SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __lowerCAmelCase ( self ) -> str:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 693 | 0 |
'''simple docstring'''
import os
def lowerCamelCase__ ( ):
with open(os.path.dirname(__snake_case ) + "/grid.txt" ) as f:
UpperCAmelCase_ = [] # noqa: E741
for _ in range(20 ):
l.append([int(__snake_case ) for x in f.readline().split()] )
UpperCAmelCase_ = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCAmelCase_ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCAmelCase_ = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCAmelCase_ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCAmelCase_ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCAmelCase_ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCAmelCase_ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCAmelCase_ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCAmelCase_ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 660 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__snake_case ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=9_9 , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=9 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__=8 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0_0_2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=0 , lowerCAmelCase__=None , lowerCAmelCase__=None , ):
_UpperCAmelCase : int = parent
_UpperCAmelCase : Dict = batch_size
_UpperCAmelCase : int = encoder_seq_length
_UpperCAmelCase : List[str] = decoder_seq_length
# For common tests
_UpperCAmelCase : Optional[Any] = self.decoder_seq_length
_UpperCAmelCase : Tuple = is_training
_UpperCAmelCase : List[str] = use_attention_mask
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : Dict = d_ff
_UpperCAmelCase : List[str] = relative_attention_num_buckets
_UpperCAmelCase : List[Any] = dropout_rate
_UpperCAmelCase : Optional[int] = initializer_factor
_UpperCAmelCase : Optional[Any] = eos_token_id
_UpperCAmelCase : List[Any] = pad_token_id
_UpperCAmelCase : List[Any] = decoder_start_token_id
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Dict = decoder_layers
def snake_case_ (self ):
return TaConfig.from_pretrained("""google/umt5-base""" )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ):
if attention_mask is None:
_UpperCAmelCase : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase : str = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase : Optional[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_a )
if decoder_head_mask is None:
_UpperCAmelCase : Any = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_a )
if cross_attn_head_mask is None:
_UpperCAmelCase : str = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase : Union[str, Any] = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase : Union[str, Any] = self.get_config()
_UpperCAmelCase : int = config.num_attention_heads
_UpperCAmelCase : Union[str, Any] = self.prepare_inputs_dict(_a , _a , _a )
return config, input_dict
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case_ (self ):
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case_ (self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
_UpperCAmelCase : int = UMTaModel(config=_a )
model.to(_a )
model.eval()
_UpperCAmelCase : str = model(
input_ids=_a , decoder_input_ids=_a , attention_mask=_a , decoder_attention_mask=_a , )
_UpperCAmelCase : List[Any] = model(input_ids=_a , decoder_input_ids=_a )
_UpperCAmelCase : Optional[int] = result.last_hidden_state
_UpperCAmelCase : Optional[int] = result.past_key_values
_UpperCAmelCase : Any = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
_UpperCAmelCase : Tuple = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
_UpperCAmelCase : Any = model(_a , use_cache=_a )
_UpperCAmelCase : List[Any] = model(_a )
_UpperCAmelCase : Tuple = model(_a , use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
_UpperCAmelCase , _UpperCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase : List[str] = model(_a )["""last_hidden_state"""]
_UpperCAmelCase : Optional[Any] = model(_a , past_key_values=_a )["""last_hidden_state"""]
# select random slice
_UpperCAmelCase : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase : Optional[int] = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , ):
_UpperCAmelCase : List[Any] = UMTaModel(config=_a ).to(_a ).half().eval()
_UpperCAmelCase : Any = model(**_a )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
snake_case : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
snake_case : Union[str, Any] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
snake_case : str = True
snake_case : Tuple = False
snake_case : List[Any] = False
snake_case : Any = True
snake_case : List[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
snake_case : Optional[int] = [0.8, 0.9]
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def snake_case_ (self ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=_a , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def snake_case_ (self ):
_UpperCAmelCase : Dict = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = config_and_inputs[0]
_UpperCAmelCase : List[Any] = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
_UpperCAmelCase : List[str] = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=_a ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ),
}
for attn_name, (name, mask) in zip(_a , head_masking.items() ):
_UpperCAmelCase : Optional[int] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase : List[Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=_a )
_UpperCAmelCase : int = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=_a , return_dict_in_generate=_a , **_a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase : Tuple = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def snake_case_ (self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=_a ).to(_a )
_UpperCAmelCase : int = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=_a , legacy=_a )
_UpperCAmelCase : Any = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
_UpperCAmelCase : Optional[Any] = tokenizer(_a , return_tensors="""pt""" , padding=_a ).input_ids
# fmt: off
_UpperCAmelCase : Tuple = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a , _a )
_UpperCAmelCase : Dict = model.generate(input_ids.to(_a ) )
_UpperCAmelCase : Dict = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
_UpperCAmelCase : List[str] = tokenizer.batch_decode(_a )
self.assertEqual(_a , _a )
| 414 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
_snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : Optional[Any] = out / out.max() * 2_55
_snake_case : Union[str, Any] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 693 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Optional[Any] ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCAmelCase__ : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True ):
model.train()
snake_case_ = model(__snake_case )
snake_case_ = F.mse_loss(__snake_case , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__snake_case )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
set_seed(42 )
snake_case_ = RegressionModel()
snake_case_ = deepcopy(__snake_case )
snake_case_ = RegressionDataset(length=80 )
snake_case_ = DataLoader(__snake_case , batch_size=16 )
model.to(accelerator.device )
if sched:
snake_case_ = AdamW(params=model.parameters() , lr=1E-3 )
snake_case_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
snake_case_ = LambdaLR(__snake_case , lr_lambda=lambda SCREAMING_SNAKE_CASE__ : epoch**0.65 )
snake_case_ = LambdaLR(__snake_case , lr_lambda=lambda SCREAMING_SNAKE_CASE__ : epoch**0.65 )
# Make a copy of `model`
if sched:
snake_case_, snake_case_, snake_case_, snake_case_ = accelerator.prepare(__snake_case , __snake_case , __snake_case , __snake_case )
else:
snake_case_, snake_case_ = accelerator.prepare(__snake_case , __snake_case )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_, snake_case_, snake_case_ = get_training_setup(__snake_case )
# Use a single batch
snake_case_, snake_case_ = next(iter(__snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_, snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_, snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
else:
# Sync grads
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__snake_case , __snake_case , __snake_case , __snake_case )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(__snake_case ) )]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_, snake_case_, snake_case_ = get_training_setup(__snake_case )
# Use a single batch
snake_case_, snake_case_ = next(iter(__snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_, snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_, snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
else:
# Sync grads
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(__snake_case ) )]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
snake_case_ = Accelerator(
split_batches=__snake_case , dispatch_batches=__snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_, snake_case_, snake_case_ = get_training_setup(__snake_case )
for iteration, batch in enumerate(__snake_case ):
snake_case_, snake_case_ = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_, snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_, snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__snake_case ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(__snake_case ) )]
GradientState._reset_state()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
snake_case_ = Accelerator(
split_batches=__snake_case , dispatch_batches=__snake_case , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = get_training_setup(__snake_case , __snake_case )
for iteration, batch in enumerate(__snake_case ):
snake_case_, snake_case_ = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_, snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_, snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__snake_case )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__snake_case ):
step_model(__snake_case , __snake_case , __snake_case , __snake_case )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
snake_case_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__snake_case ))
if accelerator.num_processes > 1:
check_model_parameters(__snake_case , __snake_case , __snake_case , __snake_case )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __SCREAMING_SNAKE_CASE ():
snake_case_ = Accelerator()
snake_case_ = RegressionDataset(length=80 )
snake_case_ = DataLoader(__snake_case , batch_size=16 )
snake_case_ = RegressionDataset(length=96 )
snake_case_ = DataLoader(__snake_case , batch_size=16 )
snake_case_, snake_case_ = accelerator.prepare(__snake_case , __snake_case )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__snake_case )
if iteration < len(__snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__snake_case )
if batch_num < len(__snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __SCREAMING_SNAKE_CASE ():
snake_case_ = Accelerator()
snake_case_ = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(__snake_case )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(__snake_case )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__snake_case , __snake_case )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__snake_case , __snake_case )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
main()
if __name__ == "__main__":
main() | 39 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 693 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'spiece.model'}
UpperCamelCase__ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
UpperCamelCase__ = {
'google/pegasus-xsum': 5_1_2,
}
UpperCamelCase__ = logging.get_logger(__name__)
class a__ ( __SCREAMING_SNAKE_CASE ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict ,a__ : int ,a__ : int="<pad>" ,a__ : Optional[Any]="</s>" ,a__ : Any="<unk>" ,a__ : List[Any]="<mask_2>" ,a__ : int="<mask_1>" ,a__ : Dict=None ,a__ : Any=103 ,a__ : Dict = None ,**a__ : str ,) -> None:
"""simple docstring"""
_lowerCAmelCase:List[Any] = offset
if additional_special_tokens is not None:
if not isinstance(_a ,_a):
raise TypeError(
F'additional_special_tokens should be of type {type(_a)}, but is'
F' {type(_a)}')
_lowerCAmelCase:List[str] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(_a) ,self.offset - 1)
]
if len(set(_a)) != len(_a):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.')
_lowerCAmelCase:List[str] = additional_special_tokens_extended
else:
_lowerCAmelCase:List[str] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 ,self.offset)]
_lowerCAmelCase:Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_a ,unk_token=_a ,mask_token=_a ,pad_token=_a ,mask_token_sent=_a ,offset=_a ,additional_special_tokens=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
_lowerCAmelCase:int = mask_token_sent
_lowerCAmelCase:Dict = vocab_file
_lowerCAmelCase:Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_a)
# add special tokens to encoder dict
_lowerCAmelCase:str = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
})
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 ,self.offset - 1)})
_lowerCAmelCase:List[str] = {v: k for k, v in self.encoder.items()}
@property
def __UpperCamelCase ( self : str) -> int:
"""simple docstring"""
return len(self.sp_model) + self.offset
def __UpperCamelCase ( self : Optional[Any]) -> Dict[str, int]:
"""simple docstring"""
_lowerCAmelCase:List[str] = {self.convert_ids_to_tokens(_a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Any = self.__dict__.copy()
_lowerCAmelCase:List[str] = None
return state
def __setstate__( self : Union[str, Any] ,a__ : List[Any]) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Any = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs'''):
_lowerCAmelCase:Optional[Any] = {}
_lowerCAmelCase:Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCamelCase ( self : Optional[int] ,a__ : Tuple) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_a ,out_type=_a)
def __UpperCamelCase ( self : List[str] ,a__ : Tuple) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
_lowerCAmelCase:List[str] = self.sp_model.piece_to_id(_a)
return sp_id + self.offset
def __UpperCamelCase ( self : str ,a__ : Union[str, Any]) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
_lowerCAmelCase:Tuple = self.sp_model.IdToPiece(index - self.offset)
return token
def __UpperCamelCase ( self : Optional[int] ,a__ : List[str]) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:int = []
_lowerCAmelCase:Dict = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_a) + token
_lowerCAmelCase:Union[str, Any] = []
else:
current_sub_tokens.append(_a)
out_string += self.sp_model.decode(_a)
return out_string.strip()
def __UpperCamelCase ( self : Union[str, Any] ,a__ : str=False) -> Union[str, Any]:
"""simple docstring"""
return 1
def __UpperCamelCase ( self : List[str] ,a__ : List[Any]) -> int:
"""simple docstring"""
_lowerCAmelCase:Dict = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __UpperCamelCase ( self : int ,a__ : str ,a__ : Dict = None ,a__ : Any = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_a)
elif token_ids_a is None:
return self._special_token_mask(_a) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def __UpperCamelCase ( self : Optional[int] ,a__ : List[str] ,a__ : str=None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict ,a__ : int ,a__ : str = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_a):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
_lowerCAmelCase:Optional[Any] = os.path.join(
_a ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,_a)
elif not os.path.isfile(self.vocab_file):
with open(_a ,'''wb''') as fi:
_lowerCAmelCase:Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_a)
return (out_vocab_file,)
| 227 |
from __future__ import annotations
import math
def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCAmelCase : Any = 'swin'
UpperCAmelCase : List[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , snake_case : Any=224 , snake_case : Dict=4 , snake_case : Union[str, Any]=3 , snake_case : Any=96 , snake_case : List[str]=[2, 2, 6, 2] , snake_case : List[str]=[3, 6, 12, 24] , snake_case : str=7 , snake_case : str=4.0 , snake_case : Optional[Any]=True , snake_case : Any=0.0 , snake_case : List[Any]=0.0 , snake_case : List[str]=0.1 , snake_case : Any="gelu" , snake_case : Optional[int]=False , snake_case : str=0.02 , snake_case : str=1E-5 , snake_case : str=32 , snake_case : Optional[int]=None , snake_case : Tuple=None , **snake_case : Dict , ):
'''simple docstring'''
super().__init__(**_a )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : List[Any] = embed_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = depths
SCREAMING_SNAKE_CASE : Any = len(_a )
SCREAMING_SNAKE_CASE : int = num_heads
SCREAMING_SNAKE_CASE : List[str] = window_size
SCREAMING_SNAKE_CASE : Any = mlp_ratio
SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : Union[str, Any] = int(embed_dim * 2 ** (len(_a ) - 1) )
SCREAMING_SNAKE_CASE : Tuple = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(_a ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
class lowercase ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCAmelCase : Optional[int] = version.parse('1.11')
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return 1E-4 | 352 |
def _A ( __snake_case :bytes ) -> str:
"""simple docstring"""
return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] )
def _A ( __snake_case :str ) -> bytes:
"""simple docstring"""
if (len(__snake_case ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__snake_case ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ : List[Any] = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = ['PoolFormerFeatureExtractor']
lowerCAmelCase__ : str = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 347 |
from functools import lru_cache
def _A ( __snake_case :int ) -> set:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__snake_case ) )
def _A ( __snake_case :list ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) in (0, 1)
def _A ( __snake_case :int ) -> list:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
while True:
# Increment each value of a generated range
__SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def _A ( __snake_case :int = 4 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 693 | 0 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_a : Optional[List[str]] = None
_a : Dict = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_a : int = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class lowercase_ :
'''simple docstring'''
__lowerCAmelCase : Any = True
__lowerCAmelCase : Optional[Any] = None
# Automatically constructed
__lowerCAmelCase : Union[str, Any] = "PIL.Image.Image"
__lowerCAmelCase : Tuple = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
__lowerCAmelCase : Any = field(default="Image" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Tuple:
"""simple docstring"""
return self.pa_type
def snake_case_ ( self , a_ ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_a , _a ):
UpperCAmelCase = np.array(_a )
if isinstance(_a , _a ):
return {"path": value, "bytes": None}
elif isinstance(_a , _a ):
return {"path": None, "bytes": value}
elif isinstance(_a , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_a )
elif isinstance(_a , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_a )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def snake_case_ ( self , a_ , a_=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
UpperCAmelCase = {}
UpperCAmelCase , UpperCAmelCase = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_a ):
UpperCAmelCase = PIL.Image.open(_a )
else:
UpperCAmelCase = path.split('::' )[-1]
try:
UpperCAmelCase = string_to_dict(_a , config.HUB_DATASETS_URL )['repo_id']
UpperCAmelCase = token_per_repo_id.get(_a )
except ValueError:
UpperCAmelCase = None
with xopen(_a , 'rb' , use_auth_token=_a ) as f:
UpperCAmelCase = BytesIO(f.read() )
UpperCAmelCase = PIL.Image.open(bytes_ )
else:
UpperCAmelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def snake_case_ ( self , a_ ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCAmelCase = pa.array([None] * len(_a ) , type=pa.binary() )
UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase = pa.array([None] * len(_a ) , type=pa.string() )
UpperCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
UpperCAmelCase = storage.field('bytes' )
else:
UpperCAmelCase = pa.array([None] * len(_a ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
UpperCAmelCase = storage.field('path' )
else:
UpperCAmelCase = pa.array([None] * len(_a ) , type=pa.string() )
UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase = pa.array(
[encode_np_array(np.array(_a ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCAmelCase = pa.array([None] * len(_a ) , type=pa.string() )
UpperCAmelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_a , self.pa_type )
def snake_case_ ( self , a_ ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ ):
with xopen(_a , 'rb' ) as f:
UpperCAmelCase = f.read()
return bytes_
UpperCAmelCase = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_a , self.pa_type )
def lowerCamelCase__ ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
UpperCAmelCase = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase = image.format
else:
UpperCAmelCase = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(__snake_case , format=__snake_case )
return buffer.getvalue()
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
if hasattr(__snake_case , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__snake_case )}
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : np.ndarray ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
UpperCAmelCase = array.dtype
UpperCAmelCase = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
UpperCAmelCase = dtype.kind
UpperCAmelCase = dtype.itemsize
UpperCAmelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase = dtype_byteorder + dtype_kind + str(__snake_case )
UpperCAmelCase = np.dtype(__snake_case )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase = PIL.Image.fromarray(array.astype(__snake_case ) )
return {"path": None, "bytes": image_to_bytes(__snake_case )}
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
UpperCAmelCase , UpperCAmelCase = first_non_null_value(__snake_case )
if isinstance(__snake_case , __snake_case ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__snake_case , np.ndarray ):
UpperCAmelCase = no_op_if_value_is_null(__snake_case )
return [obj_to_image_dict_func(__snake_case ) for obj in objs]
elif isinstance(__snake_case , PIL.Image.Image ):
UpperCAmelCase = no_op_if_value_is_null(__snake_case )
return [obj_to_image_dict_func(__snake_case ) for obj in objs]
else:
return objs
else:
return objs
| 447 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = False
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "kinetics" in model_name:
__SCREAMING_SNAKE_CASE = 400
__SCREAMING_SNAKE_CASE = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__SCREAMING_SNAKE_CASE = 174
__SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "small" in model_name:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = 768
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 2048
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 1280
__SCREAMING_SNAKE_CASE = 5120
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 640
__SCREAMING_SNAKE_CASE = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
if "encoder." in name:
__SCREAMING_SNAKE_CASE = name.replace("encoder." , "" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
return name
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case )
if key.startswith("encoder." ):
__SCREAMING_SNAKE_CASE = key.replace("encoder." , "" )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
if key.startswith("decoder.blocks" ):
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[2] )
__SCREAMING_SNAKE_CASE = "decoder.decoder_layers."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = "videomae.encoder.layer."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = np.load(__snake_case )
return list(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case )
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case )
else:
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
__SCREAMING_SNAKE_CASE = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
if "model" in files:
__SCREAMING_SNAKE_CASE = files["model"]
else:
__SCREAMING_SNAKE_CASE = files["module"]
__SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
__SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _a ( __SCREAMING_SNAKE_CASE ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
A = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
A = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
A = '''question'''
A = '''context'''
A = '''answers'''
@property
def __snake_case (self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 556 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693 | 0 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case ( a_ : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def snake_case ( a_ : np.ndarray , a_ : np.ndarray , a_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase_ : int = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__snake_case , __snake_case )
# Predict target for test data
UpperCamelCase_ : Dict = xgb.predict(__snake_case )
UpperCamelCase_ : int = predictions.reshape(len(__snake_case ) , 1 )
return predictions
def snake_case ( ) -> None:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = fetch_california_housing()
UpperCamelCase_ , UpperCamelCase_ : Any = data_handling(__snake_case )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : str = train_test_split(
__snake_case , __snake_case , test_size=0.25 , random_state=1 )
UpperCamelCase_ : List[str] = xgboost(__snake_case , __snake_case , __snake_case )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(__snake_case , __snake_case )}" )
print(f"Mean Square Error : {mean_squared_error(__snake_case , __snake_case )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 208 |
from math import sqrt
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def _A ( __snake_case :int = 1_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['MobileViTFeatureExtractor']
UpperCamelCase_ = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 0 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
__snake_case : List[Any] = 'src/transformers'
# Matches is_xxx_available()
__snake_case : Dict = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__snake_case : str = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__snake_case : Union[str, Any] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__snake_case : Optional[int] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__snake_case : Dict = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__snake_case : Optional[int] = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__snake_case : str = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__snake_case : Any = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__snake_case : int = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__snake_case : Union[str, Any] = re.compile(R'''^\s*try:''')
# Catches a line with else:
__snake_case : Dict = re.compile(R'''^\s*else:''')
def lowerCamelCase__ ( A_ ):
if _re_test_backend.search(__snake_case ) is None:
return None
UpperCAmelCase_ = [b[0] for b in _re_backend.findall(__snake_case )]
backends.sort()
return "_and_".join(__snake_case )
def lowerCamelCase__ ( A_ ):
with open(__snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = 0
while line_index < len(__snake_case ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase_ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase_ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__snake_case ):
UpperCAmelCase_ = _re_one_line_import_struct.search(__snake_case ).groups()[0]
UpperCAmelCase_ = re.findall("\[([^\]]+)\]" , __snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
UpperCAmelCase_ = _re_import_struct_key_value.search(__snake_case )
if single_line_import_search is not None:
UpperCAmelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__snake_case ) > 0]
objects.extend(__snake_case )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase_ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
UpperCAmelCase_ = lines[line_index]
if _re_import_struct_add_one.search(__snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(__snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(__snake_case ) is not None:
UpperCAmelCase_ = _re_import_struct_add_many.search(__snake_case ).groups()[0].split(", " )
UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(__snake_case ) > 0]
objects.extend(__snake_case )
elif _re_between_brackets.search(__snake_case ) is not None:
UpperCAmelCase_ = _re_between_brackets.search(__snake_case ).groups()[0].split(", " )
UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(__snake_case ) > 0]
objects.extend(__snake_case )
elif _re_quote_object.search(__snake_case ) is not None:
objects.append(_re_quote_object.search(__snake_case ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase_ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase_ = []
while (
line_index < len(__snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase_ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase_ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase__ ( A_ , A_ ):
def find_duplicates(A_ ):
return [k for k, v in collections.Counter(__snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase_ = []
for key in import_dict_objects.keys():
UpperCAmelCase_ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCAmelCase_ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase_ = "base imports" if key == "none" else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def lowerCamelCase__ ( ):
UpperCAmelCase_ = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
UpperCAmelCase_ = os.path.join(__snake_case , "__init__.py" )
UpperCAmelCase_ = parse_init(__snake_case )
if objects is not None:
UpperCAmelCase_ = analyze_results(*__snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase_ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(__snake_case ) )
if len(__snake_case ) > 0:
raise ValueError("\n\n".join(__snake_case ) )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = []
for path, directories, files in os.walk(__snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__snake_case ) / folder).glob("*.py" ) ) ) == 0:
continue
UpperCAmelCase_ = str((Path(__snake_case ) / folder).relative_to(__snake_case ) )
UpperCAmelCase_ = short_path.replace(os.path.sep , "." )
submodules.append(__snake_case )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase_ = str((Path(__snake_case ) / fname).relative_to(__snake_case ) )
UpperCAmelCase_ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__snake_case )
return submodules
__snake_case : Tuple = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def lowerCamelCase__ ( ):
UpperCAmelCase_ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__snake_case , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCAmelCase_ = spec.loader.load_module()
UpperCAmelCase_ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__snake_case ) > 0:
UpperCAmelCase_ = "\n".join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 660 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = encoder_seq_length
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_layers
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = config.num_attention_heads
__SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a )
return config, input_dict
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig(
vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a )
__SCREAMING_SNAKE_CASE = result.last_hidden_state
__SCREAMING_SNAKE_CASE = result.past_key_values
__SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
__SCREAMING_SNAKE_CASE = model(_a )
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) )
def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval()
__SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ =[0.8, 0.9]
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], )
@unittest.skipIf(torch_device == "cpu", "Cant do half precision" )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs[0]
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
__SCREAMING_SNAKE_CASE = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
}
for attn_name, (name, mask) in zip(_a, head_masking.items() ):
__SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_heads, device=_a )
__SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, )
# We check the state of decoder_attentions and cross_attentions just from the last step
__SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __lowerCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a )
__SCREAMING_SNAKE_CASE = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a, _a )
__SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) )
__SCREAMING_SNAKE_CASE = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a )
self.assertEqual(_a, _a )
| 693 | 0 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Union[str, Any] = [0]
_UpperCAmelCase : Tuple = [0]
_UpperCAmelCase : List[str] = len(_a )
self.assertEqual(k.knapsack(_a , _a , _a , _a ) , 0 )
_UpperCAmelCase : Optional[Any] = [6_0]
_UpperCAmelCase : Any = [1_0]
_UpperCAmelCase : List[Any] = len(_a )
self.assertEqual(k.knapsack(_a , _a , _a , _a ) , 0 )
def snake_case_ (self ):
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : Optional[Any] = [1, 2, 3]
_UpperCAmelCase : Any = [3, 2, 1]
_UpperCAmelCase : Tuple = len(_a )
self.assertEqual(k.knapsack(_a , _a , _a , _a ) , 5 )
def snake_case_ (self ):
_UpperCAmelCase : Tuple = 5_0
_UpperCAmelCase : Dict = [6_0, 1_0_0, 1_2_0]
_UpperCAmelCase : Tuple = [1_0, 2_0, 3_0]
_UpperCAmelCase : Any = len(_a )
self.assertEqual(k.knapsack(_a , _a , _a , _a ) , 2_2_0 )
if __name__ == "__main__":
unittest.main()
| 414 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__SCREAMING_SNAKE_CASE = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
__SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(__snake_case :str ):
for patt, repl in iter(__snake_case ):
__SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case )
return f'''bert/{name}'''
def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ):
__SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case )
__SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE = torch_tensor.T
__SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = session.run(__snake_case )
print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' )
__SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) )
def _A ( __snake_case :str=None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" )
__SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case )
__SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 693 | 0 |
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 101 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""]
def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str:
super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a )
__SCREAMING_SNAKE_CASE = chunk_length_s
__SCREAMING_SNAKE_CASE = overlap
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = bool(
isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) )
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a, np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa )
elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
__SCREAMING_SNAKE_CASE = "max_length"
else:
__SCREAMING_SNAKE_CASE = input_values
# normal padding on batch
if padded_inputs is None:
__SCREAMING_SNAKE_CASE = self.pad(
_a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, )
if padding:
__SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" )
__SCREAMING_SNAKE_CASE = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
__SCREAMING_SNAKE_CASE = example[..., None]
input_values.append(example.T )
__SCREAMING_SNAKE_CASE = input_values
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 693 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Any = 1_6 , _UpperCamelCase : Any = 8_8 , _UpperCamelCase : int = None , _UpperCamelCase : Union[str, Any] = 1 , _UpperCamelCase : int = 0.0 , _UpperCamelCase : List[str] = 3_2 , _UpperCamelCase : str = None , _UpperCamelCase : List[Any] = False , _UpperCamelCase : int = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Any = "geglu" , _UpperCamelCase : int = None , ) ->Any:
super().__init__()
snake_case_ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_a , attention_head_dim=_a , in_channels=_a , num_layers=_a , dropout=_a , norm_num_groups=_a , cross_attention_dim=_a , attention_bias=_a , sample_size=_a , num_vector_embeds=_a , activation_fn=_a , num_embeds_ada_norm=_a , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
snake_case_ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
snake_case_ = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
snake_case_ = [1, 0]
def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Dict=None , _UpperCamelCase : Any=None , _UpperCamelCase : str=None , _UpperCamelCase : Tuple = True , ) ->Union[str, Any]:
snake_case_ = hidden_states
snake_case_ = []
snake_case_ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
snake_case_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
snake_case_ = self.transformer_index_for_condition[i]
snake_case_ = self.transformers[transformer_index](
_a , encoder_hidden_states=_a , timestep=_a , cross_attention_kwargs=_a , return_dict=_a , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
snake_case_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
snake_case_ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_a ) | 39 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =42
SCREAMING_SNAKE_CASE__ =42
def __init__( self, _a, _a ) -> Dict:
super().__init__()
self.register_modules(unet=_a, scheduler=_a )
@torch.no_grad()
def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]:
__SCREAMING_SNAKE_CASE = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE = self.unet
__SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE = sample.to(self.device )
self.scheduler.set_timesteps(_a )
self.scheduler.set_sigmas(_a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample
# prediction step
__SCREAMING_SNAKE_CASE = model(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean
__SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 )
__SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_a )
| 693 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a__ ( __SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __UpperCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_a)
def __UpperCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
_lowerCAmelCase:Dict = self._create_example_records()
_lowerCAmelCase:Union[str, Any] = Dataset.from_list(_a)
self.assertListEqual(dset.column_names ,['''col_1''', '''col_2'''])
for i, r in enumerate(_a):
self.assertDictEqual(_a ,example_records[i])
def __UpperCamelCase ( self : int) -> int:
"""simple docstring"""
_lowerCAmelCase:str = self._create_example_records()
_lowerCAmelCase:Any = Dataset.from_list(_a)
_lowerCAmelCase:Tuple = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info ,dset_from_dict.info)
def __UpperCamelCase ( self : Optional[Any]) -> int: # checks what happens with missing columns
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
_lowerCAmelCase:str = Dataset.from_list(_a)
self.assertDictEqual(dset[0] ,{'''col_1''': 1})
self.assertDictEqual(dset[1] ,{'''col_1''': None}) # NB: first record is used for columns
def __UpperCamelCase ( self : int) -> Optional[int]: # checks if the type can be inferred from the second record
"""simple docstring"""
_lowerCAmelCase:List[Any] = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
_lowerCAmelCase:int = Dataset.from_list(_a)
self.assertEqual(dset.info.features['''col_1'''] ,Sequence(Value('''int64''')))
def __UpperCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = Dataset.from_list([])
self.assertEqual(len(_a) ,0)
self.assertListEqual(dset.column_names ,[])
| 227 |
def _A ( __snake_case :int = 400_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowercase ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCAmelCase : Tuple = 42
UpperCAmelCase : List[str] = 42
class lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCAmelCase : Dict = 1
@register_to_config
def __init__( self : str , snake_case : Optional[int] = 2000 , snake_case : List[Any] = 0.15 , snake_case : Optional[int] = 0.01 , snake_case : Dict = 1348.0 , snake_case : List[str] = 1E-5 , snake_case : int = 1 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = sigma_max
# setable values
SCREAMING_SNAKE_CASE : List[Any] = None
self.set_sigmas(_a , _a , _a , _a )
def lowerCamelCase_ ( self : Optional[Any] , snake_case : List[Any] , snake_case : Any = None ):
'''simple docstring'''
return sample
def lowerCamelCase_ ( self : List[Any] , snake_case : int , snake_case : Dict = None , snake_case : str = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
SCREAMING_SNAKE_CASE : List[Any] = torch.linspace(1 , _a , _a , device=_a )
def lowerCamelCase_ ( self : Any , snake_case : str , snake_case : Dict = None , snake_case : Tuple = None , snake_case : Optional[int] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = sigma_min if sigma_min is not None else self.config.sigma_min
SCREAMING_SNAKE_CASE : Dict = sigma_max if sigma_max is not None else self.config.sigma_max
SCREAMING_SNAKE_CASE : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_a , _a )
SCREAMING_SNAKE_CASE : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
SCREAMING_SNAKE_CASE : int = torch.exp(torch.linspace(math.log(_a ) , math.log(_a ) , _a ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowerCamelCase_ ( self : List[Any] , snake_case : List[str] , snake_case : Tuple ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowerCamelCase_ ( self : Dict , snake_case : List[Any] , snake_case : str , snake_case : List[Any] , snake_case : Any = None , snake_case : Tuple = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
SCREAMING_SNAKE_CASE : List[str] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
SCREAMING_SNAKE_CASE : Union[str, Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
SCREAMING_SNAKE_CASE : Optional[int] = timesteps.to(self.discrete_sigmas.device )
SCREAMING_SNAKE_CASE : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device )
SCREAMING_SNAKE_CASE : str = self.get_adjacent_sigma(_a , _a ).to(sample.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros_like(_a )
SCREAMING_SNAKE_CASE : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
SCREAMING_SNAKE_CASE : Any = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE : Union[str, Any] = diffusion.unsqueeze(-1 )
SCREAMING_SNAKE_CASE : str = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=_a , device=sample.device , dtype=sample.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
SCREAMING_SNAKE_CASE : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_a , prev_sample_mean=_a )
def lowerCamelCase_ ( self : List[str] , snake_case : Any , snake_case : List[Any] , snake_case : List[str] = None , snake_case : List[str] = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=_a ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
SCREAMING_SNAKE_CASE : List[Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE : Optional[int] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE : Union[str, Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
SCREAMING_SNAKE_CASE : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
SCREAMING_SNAKE_CASE : List[Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE : str = step_size.unsqueeze(-1 )
SCREAMING_SNAKE_CASE : str = sample + step_size * model_output
SCREAMING_SNAKE_CASE : Dict = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def lowerCamelCase_ ( self : Any , snake_case : str , snake_case : Tuple , snake_case : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE : Dict = self.discrete_sigmas.to(original_samples.device )[timesteps]
SCREAMING_SNAKE_CASE : Dict = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_a ) * sigmas[:, None, None, None]
)
SCREAMING_SNAKE_CASE : Tuple = noise + original_samples
return noisy_samples
def __len__( self : Optional[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps | 352 |
from __future__ import annotations
_snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(__snake_case )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 693 | 0 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _a ( __lowerCAmelCase : Dict ):
"""simple docstring"""
snake_case__ : Union[str, Any] = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
snake_case__ : Any = False
if "finetuned" in model_name:
snake_case__ : str = '''huggingface/label-files'''
if "kinetics" in model_name:
snake_case__ : Tuple = 4_00
snake_case__ : Optional[int] = '''kinetics400-id2label.json'''
elif "ssv2" in model_name:
snake_case__ : Optional[int] = 1_74
snake_case__ : Union[str, Any] = '''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
snake_case__ : str = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : Optional[int] = {int(__snake_case ): v for k, v in idalabel.items()}
snake_case__ : Union[str, Any] = idalabel
snake_case__ : Any = {v: k for k, v in idalabel.items()}
return config
def _a ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if "small" in model_name:
snake_case__ : Dict = 3_84
snake_case__ : List[str] = 15_36
snake_case__ : List[Any] = 12
snake_case__ : Union[str, Any] = 16
snake_case__ : Union[str, Any] = 12
snake_case__ : Optional[int] = 3
snake_case__ : Dict = 1_92
snake_case__ : int = 7_68
elif "large" in model_name:
snake_case__ : int = 10_24
snake_case__ : Union[str, Any] = 40_96
snake_case__ : Optional[Any] = 24
snake_case__ : List[Any] = 16
snake_case__ : Optional[Any] = 12
snake_case__ : Any = 8
snake_case__ : Optional[int] = 5_12
snake_case__ : Optional[int] = 20_48
elif "huge" in model_name:
snake_case__ : Optional[int] = 12_80
snake_case__ : Any = 51_20
snake_case__ : Optional[int] = 32
snake_case__ : Union[str, Any] = 16
snake_case__ : Optional[Any] = 12
snake_case__ : int = 8
snake_case__ : str = 6_40
snake_case__ : Any = 25_60
elif "base" not in model_name:
raise ValueError('''Model name should include either \"small\", \"base\", \"large\", or \"huge\"''' )
def _a ( __lowerCAmelCase : List[Any] ):
"""simple docstring"""
if "encoder." in name:
snake_case__ : Union[str, Any] = name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
snake_case__ : Tuple = name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
snake_case__ : str = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
snake_case__ : Union[str, Any] = name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case__ : str = name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case__ : List[str] = name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
snake_case__ : List[str] = name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
snake_case__ : Tuple = name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
snake_case__ : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
snake_case__ : Optional[Any] = name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
snake_case__ : Dict = name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
snake_case__ : str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case__ : Any = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case__ : Union[str, Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case__ : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
snake_case__ : Optional[int] = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
snake_case__ : Union[str, Any] = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
snake_case__ : Dict = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case__ : Any = name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case__ : Any = name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
snake_case__ : Dict = name.replace('''head''' , '''classifier''' )
return name
def _a ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case__ : Optional[int] = orig_state_dict.pop(__snake_case )
if key.startswith('''encoder.''' ):
snake_case__ : Dict = key.replace('''encoder.''' , '''''' )
if "qkv" in key:
snake_case__ : Optional[int] = key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
snake_case__ : Any = config.decoder_hidden_size
snake_case__ : str = int(key_split[2] )
snake_case__ : str = '''decoder.decoder_layers.'''
if "weight" in key:
snake_case__ : int = val[:dim, :]
snake_case__ : Union[str, Any] = val[dim : dim * 2, :]
snake_case__ : List[str] = val[-dim:, :]
else:
snake_case__ : Union[str, Any] = config.hidden_size
snake_case__ : Dict = int(key_split[1] )
snake_case__ : int = '''videomae.encoder.layer.'''
if "weight" in key:
snake_case__ : List[str] = val[:dim, :]
snake_case__ : List[Any] = val[dim : dim * 2, :]
snake_case__ : Union[str, Any] = val[-dim:, :]
else:
snake_case__ : Optional[Any] = val
return orig_state_dict
def _a ( ):
"""simple docstring"""
snake_case__ : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
snake_case__ : List[Any] = np.load(__snake_case )
return list(__snake_case )
def _a ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
snake_case__ : int = get_videomae_config(__snake_case )
if "finetuned" in model_name:
snake_case__ : Any = VideoMAEForVideoClassification(__snake_case )
else:
snake_case__ : List[str] = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
snake_case__ : str = '''pytorch_model.bin'''
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
snake_case__ : Any = torch.load(__snake_case , map_location='''cpu''' )
if "model" in files:
snake_case__ : int = files['''model''']
else:
snake_case__ : List[Any] = files['''module''']
snake_case__ : List[str] = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
snake_case__ : Any = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
snake_case__ : List[Any] = prepare_video()
snake_case__ : List[str] = image_processor(__snake_case , return_tensors='''pt''' )
if "finetuned" not in model_name:
snake_case__ : Optional[int] = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
snake_case__ : Any = torch.load(__snake_case )
snake_case__ : Optional[Any] = model(**__snake_case )
snake_case__ : str = outputs.logits
snake_case__ : Dict = [
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case__ : Tuple = torch.Size([1, 4_00] )
snake_case__ : int = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case__ : Optional[Any] = torch.Size([1, 1_74] )
snake_case__ : Union[str, Any] = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
snake_case__ : Optional[int] = torch.Size([1, 14_08, 15_36] )
snake_case__ : int = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
snake_case__ : Tuple = torch.Size([1, 14_08, 15_36] )
snake_case__ : Dict = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case__ : Any = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
snake_case__ : Tuple = torch.Size([1, 14_08, 15_36] )
snake_case__ : Optional[int] = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case__ : Union[str, Any] = torch.Size([1, 4_00] )
snake_case__ : str = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case__ : List[str] = torch.Size([1, 4_00] )
snake_case__ : List[str] = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case__ : List[Any] = torch.Size([1, 4_00] )
snake_case__ : List[Any] = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case__ : Any = torch.Size([1, 4_00] )
snake_case__ : List[str] = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
snake_case__ : List[str] = torch.Size([1, 14_08, 15_36] )
snake_case__ : List[str] = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case__ : Union[str, Any] = torch.Size([1, 1_74] )
snake_case__ : Optional[Any] = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
snake_case__ : str = torch.Size([1, 14_08, 15_36] )
snake_case__ : Optional[Any] = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case__ : List[str] = torch.Size([1, 1_74] )
snake_case__ : Optional[int] = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case__ : int = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__snake_case , organization='''nielsr''' )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 347 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
return f'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __getitem__( self, _a ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, _a, _a ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
__SCREAMING_SNAKE_CASE = self.head
for _ in range(_a ):
__SCREAMING_SNAKE_CASE = current.next
__SCREAMING_SNAKE_CASE = data
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(len(self ), _a )
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(0, _a )
def __lowerCAmelCase ( self, _a, _a ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
__SCREAMING_SNAKE_CASE = Node(_a )
if self.head is None:
__SCREAMING_SNAKE_CASE = new_node
elif index == 0:
__SCREAMING_SNAKE_CASE = self.head # link new_node to head
__SCREAMING_SNAKE_CASE = new_node
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = new_node
def __lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def __lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self, _a = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
__SCREAMING_SNAKE_CASE = self.head # default first node
if index == 0:
__SCREAMING_SNAKE_CASE = self.head.next
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> bool:
return self.head is None
def __lowerCAmelCase ( self ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.head
while current:
# Store the current node's next node.
__SCREAMING_SNAKE_CASE = current.next
# Make the current node's next point backwards
__SCREAMING_SNAKE_CASE = prev
# Make the previous node be the current node
__SCREAMING_SNAKE_CASE = current
# Make the current node the next node (to progress iteration)
__SCREAMING_SNAKE_CASE = next_node
# Return prev in order to put the head at the end
__SCREAMING_SNAKE_CASE = prev
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__SCREAMING_SNAKE_CASE = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__SCREAMING_SNAKE_CASE = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__SCREAMING_SNAKE_CASE = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__SCREAMING_SNAKE_CASE = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__snake_case )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
__SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip()
print("New list:" )
print(__snake_case )
print(f'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 693 | 0 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_a : List[Any] = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
_a : List[Any] = F'''https://www.google.com/search?q={query}&num=100'''
_a : int = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
_a : List[Any] = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
_a : List[Any] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 447 |
import argparse
import json
from tqdm import tqdm
def _A ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
__SCREAMING_SNAKE_CASE = json.load(__snake_case )
for dpr_record in tqdm(__snake_case ):
__SCREAMING_SNAKE_CASE = dpr_record["question"]
__SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__snake_case ) + "\n" )
if __name__ == "__main__":
main()
| 693 | 0 |
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
UpperCAmelCase_: List[str] = 0
UpperCAmelCase_: str = str(__snake_case )
while len(__snake_case ) != 1:
UpperCAmelCase_: List[str] = [int(__snake_case ) for i in num_string]
UpperCAmelCase_: str = 1
for i in range(0 , len(__snake_case ) ):
total *= numbers[i]
UpperCAmelCase_: List[Any] = str(__snake_case )
steps += 1
return steps
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
UpperCAmelCase_: Tuple = 0
UpperCAmelCase_: List[Any] = str(__snake_case )
while len(__snake_case ) != 1:
UpperCAmelCase_: Dict = [int(__snake_case ) for i in num_string]
UpperCAmelCase_: int = 0
for i in range(0 , len(__snake_case ) ):
total += numbers[i]
UpperCAmelCase_: int = str(__snake_case )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 556 |
def _A ( __snake_case :int = 10**9 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def snake_case ( a_ : Tuple ) -> List[str]:
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase_ : str = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
UpperCamelCase_ : Any = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
UpperCamelCase_ : List[Any] = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase_ : Tuple = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
UpperCamelCase_ : Any = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCamelCase_ : Optional[int] = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
UpperCamelCase_ : List[str] = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
UpperCamelCase_ : int = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
UpperCamelCase_ : Union[str, Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCamelCase_ : int = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCamelCase_ : str = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCamelCase_ : Optional[int] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCamelCase_ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCamelCase_ : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
UpperCamelCase_ : Any = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
UpperCamelCase_ : List[str] = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
UpperCamelCase_ : Tuple = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase_ : str = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase_ : int = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def snake_case ( a_ : List[Any] , a_ : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase_ : int = orig_state_dict.pop(__snake_case )
if "qkv" in key:
UpperCamelCase_ : Optional[Any] = key.split(""".""" )
UpperCamelCase_ : int = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase_ : Dict = config.decoder_hidden_size
UpperCamelCase_ : Optional[Any] = """decoder.decoder_layers."""
if "weight" in key:
UpperCamelCase_ : Any = val[:dim, :]
UpperCamelCase_ : Union[str, Any] = val[dim : dim * 2, :]
UpperCamelCase_ : Dict = val[-dim:, :]
elif "bias" in key:
UpperCamelCase_ : Optional[int] = val[:dim]
UpperCamelCase_ : int = val[dim : dim * 2]
UpperCamelCase_ : Optional[Any] = val[-dim:]
else:
UpperCamelCase_ : Dict = config.hidden_size
UpperCamelCase_ : Tuple = """vit.encoder.layer."""
if "weight" in key:
UpperCamelCase_ : Any = val[:dim, :]
UpperCamelCase_ : Union[str, Any] = val[dim : dim * 2, :]
UpperCamelCase_ : Dict = val[-dim:, :]
elif "bias" in key:
UpperCamelCase_ : Any = val[:dim]
UpperCamelCase_ : Dict = val[dim : dim * 2]
UpperCamelCase_ : Tuple = val[-dim:]
else:
UpperCamelCase_ : str = val
return orig_state_dict
def snake_case ( a_ : Any , a_ : int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase_ : List[str] = 1_024
UpperCamelCase_ : List[str] = 4_096
UpperCamelCase_ : int = 24
UpperCamelCase_ : List[Any] = 16
elif "huge" in checkpoint_url:
UpperCamelCase_ : Optional[int] = 14
UpperCamelCase_ : List[Any] = 1_280
UpperCamelCase_ : int = 5_120
UpperCamelCase_ : Dict = 32
UpperCamelCase_ : Any = 16
UpperCamelCase_ : int = ViTMAEForPreTraining(__snake_case )
UpperCamelCase_ : int = torch.hub.load_state_dict_from_url(__snake_case , map_location="""cpu""" )["""model"""]
UpperCamelCase_ : List[Any] = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase_ : int = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
UpperCamelCase_ : str = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
UpperCamelCase_ : Union[str, Any] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
UpperCamelCase_ : Optional[Any] = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase_ : Union[str, Any] = image_processor(images=__snake_case , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
UpperCamelCase_ : List[str] = model(**__snake_case )
UpperCamelCase_ : Optional[Any] = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase_ : Union[str, Any] = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
UpperCamelCase_ : List[str] = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
UpperCamelCase_ : List[str] = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
UpperCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase =parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 208 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_snake_case , _snake_case , _snake_case : List[Any] = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =None
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =None
# Automatically constructed
SCREAMING_SNAKE_CASE__ ="dict"
SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Optional[int]:
return self.pa_type
def __lowerCAmelCase ( self, _a ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_a, _a ):
return {"bytes": None, "path": value}
elif isinstance(_a, _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__SCREAMING_SNAKE_CASE = BytesIO()
sf.write(_a, value["array"], value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
__SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67
__SCREAMING_SNAKE_CASE = BytesIO(bytes() )
sf.write(_a, _a, value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self, _a, _a = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
__SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
__SCREAMING_SNAKE_CASE = token_per_repo_id or {}
__SCREAMING_SNAKE_CASE = path.split("::" )[-1]
try:
__SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"]
__SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__SCREAMING_SNAKE_CASE = None
with xopen(_a, "rb", use_auth_token=_a ) as f:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
__SCREAMING_SNAKE_CASE = array.T
if self.mono:
__SCREAMING_SNAKE_CASE = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate )
__SCREAMING_SNAKE_CASE = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
__SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("bytes" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("path" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
return array_cast(_a, self.pa_type )
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a, "rb" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
return bytes_
__SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
__SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
| 693 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = ['input_values', 'padding_mask']
def __init__( self : Optional[int] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Tuple = 24000 , UpperCAmelCase__ : Optional[int] = 0.0 , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : int = None , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
super().__init__(feature_size=_a , sampling_rate=_a , padding_value=_a , **_a )
lowercase : List[Any] =chunk_length_s
lowercase : int =overlap
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : List[str] = False , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : Union[str, Any] = None , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
lowercase : Union[str, Any] =True
lowercase : Union[str, Any] =bool(
isinstance(_a , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
lowercase : Union[str, Any] =[np.asarray(_a , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a , np.ndarray ):
lowercase : str =np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase : str =raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : str =[np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowercase : Optional[int] =None
lowercase : Union[str, Any] =BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase : Optional[Any] =min(array.shape[0] for array in raw_audio )
lowercase : Tuple =int(np.floor(max_length / self.chunk_stride ) )
lowercase : List[Any] =(nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase : Optional[int] =max(array.shape[0] for array in raw_audio )
lowercase : Union[str, Any] =int(np.ceil(max_length / self.chunk_stride ) )
lowercase : int =(nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase : Tuple ='''max_length'''
else:
lowercase : List[Any] =input_values
# normal padding on batch
if padded_inputs is None:
lowercase : Dict =self.pad(
_a , max_length=_a , truncation=_a , padding=_a , return_attention_mask=_a , )
if padding:
lowercase : int =padded_inputs.pop('''attention_mask''' )
lowercase : int =[]
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
lowercase : Any =example[..., None]
input_values.append(example.T )
lowercase : Optional[Any] =input_values
if return_tensors is not None:
lowercase : str =padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 92 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,)
SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),)
def __lowerCAmelCase ( self, **_a ) -> str:
__SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00}
config.update(**_a )
return config
def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self, _a=0, **_a ) -> int:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, **_a ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_a, "set_timesteps" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ):
__SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __lowerCAmelCase ( self ) -> str:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 693 | 0 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__snake_case : int = logging.getLogger(__name__)
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.layer[current_layer](_a , _a , head_mask[current_layer] )
UpperCAmelCase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , __SCREAMING_SNAKE_CASE , )
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
super().__init__(_a )
UpperCAmelCase_ = BertEncoderWithPabee(_a )
self.init_weights()
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = threshold
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = patience
def lowerCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
def lowerCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase_ = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def lowerCamelCase_ ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCAmelCase_ = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCAmelCase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase_ = torch.ones(_a , device=_a )
if token_type_ids is None:
UpperCAmelCase_ = torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase_ = self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = encoder_hidden_states.size()
UpperCAmelCase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase_ = torch.ones(_a , device=_a )
UpperCAmelCase_ = self.invert_attention_mask(_a )
else:
UpperCAmelCase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase_ = self.get_head_mask(_a , self.config.num_hidden_layers )
UpperCAmelCase_ = self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
UpperCAmelCase_ = embedding_output
if self.training:
UpperCAmelCase_ = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase_ = self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
UpperCAmelCase_ = self.pooler(_a )
UpperCAmelCase_ = output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase_ = self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
UpperCAmelCase_ = self.pooler(encoder_outputs[0] )
UpperCAmelCase_ = [output_layers[self.config.num_hidden_layers - 1](_a )]
else:
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase_ = self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
UpperCAmelCase_ = self.pooler(_a )
UpperCAmelCase_ = output_layers[i](_a )
if regression:
UpperCAmelCase_ = logits.detach()
if patient_result is not None:
UpperCAmelCase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase_ = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
UpperCAmelCase_ = 0
UpperCAmelCase_ = logits
if patient_counter == self.patience:
break
UpperCAmelCase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , __SCREAMING_SNAKE_CASE , )
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , UpperCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
UpperCAmelCase_ = config.num_labels
UpperCAmelCase_ = BertModelWithPabee(_a )
UpperCAmelCase_ = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def lowerCamelCase_ ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase_ = (logits[-1],)
if labels is not None:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ = MSELoss()
UpperCAmelCase_ = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ = CrossEntropyLoss()
UpperCAmelCase_ = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase_ = (total_loss / total_weights,) + outputs
return outputs
| 660 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __snake_case :List[Any] , __snake_case :Union[str, Any]=1000 ) -> int:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__SCREAMING_SNAKE_CASE = n - 1
__SCREAMING_SNAKE_CASE = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__SCREAMING_SNAKE_CASE = 0
while count < prec:
__SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 )
__SCREAMING_SNAKE_CASE = bin_exp_mod(__snake_case , __snake_case , __snake_case )
if b != 1:
__SCREAMING_SNAKE_CASE = True
for _ in range(__snake_case ):
if b == n - 1:
__SCREAMING_SNAKE_CASE = False
break
__SCREAMING_SNAKE_CASE = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case : int = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 693 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
snake_case : Optional[Any] = """dandelin/vilt-b32-finetuned-vqa"""
snake_case : List[Any] = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
snake_case : Optional[int] = """image_qa"""
snake_case : str = AutoProcessor
snake_case : Tuple = AutoModelForVisualQuestionAnswering
snake_case : int = ["""image""", """text"""]
snake_case : Tuple = ["""text"""]
def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
requires_backends(self , ["""vision"""] )
super().__init__(*_a , **_a )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
return self.pre_processor(_a , _a , return_tensors="""pt""" )
def snake_case_ (self , lowerCAmelCase__ ):
with torch.no_grad():
return self.model(**_a ).logits
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 414 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _A ( __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int , __snake_case :int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
__SCREAMING_SNAKE_CASE = ksize + 1
__SCREAMING_SNAKE_CASE = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
__SCREAMING_SNAKE_CASE = x - ksize // 2
__SCREAMING_SNAKE_CASE = y - ksize // 2
# degree to radiant
__SCREAMING_SNAKE_CASE = theta / 180 * np.pi
__SCREAMING_SNAKE_CASE = np.cos(_theta )
__SCREAMING_SNAKE_CASE = np.sin(_theta )
# get kernel x
__SCREAMING_SNAKE_CASE = cos_theta * px + sin_theta * py
# get kernel y
__SCREAMING_SNAKE_CASE = -sin_theta * px + cos_theta * py
# fill kernel
__SCREAMING_SNAKE_CASE = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case : Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case : List[str] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
_snake_case : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case : Optional[Any] = out / out.max() * 2_55
_snake_case : Union[str, Any] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 693 | 0 |
lowerCAmelCase__ : Any ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 101 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693 | 0 |
import argparse
import json
from tqdm import tqdm
def __SCREAMING_SNAKE_CASE ():
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=__snake_case , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=__snake_case , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=__snake_case , help='''where to store parsed gold_data_path file''' , )
snake_case_ = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
snake_case_ = json.load(__snake_case )
for dpr_record in tqdm(__snake_case ):
snake_case_ = dpr_record['''question''']
snake_case_ = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(__snake_case ) + '''\n''' )
if __name__ == "__main__":
main() | 39 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 693 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 227 |
from __future__ import annotations
import math
def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
def __a ( __lowerCAmelCase ) -> Dict: # noqa: E741
SCREAMING_SNAKE_CASE : str = len(__snake_case )
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Any = [0] * n
SCREAMING_SNAKE_CASE : int = [False] * n
SCREAMING_SNAKE_CASE : List[str] = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
SCREAMING_SNAKE_CASE : Any = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
SCREAMING_SNAKE_CASE : Dict = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
SCREAMING_SNAKE_CASE : Tuple = True
# AP found via cycle
if at == low[to]:
SCREAMING_SNAKE_CASE : Dict = True
else:
SCREAMING_SNAKE_CASE : Optional[int] = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Dict = dfs(__snake_case , __snake_case , -1 , __snake_case )
SCREAMING_SNAKE_CASE : List[Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
_lowerCamelCase : List[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 352 |
def _A ( __snake_case :bytes ) -> str:
"""simple docstring"""
return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] )
def _A ( __snake_case :str ) -> bytes:
"""simple docstring"""
if (len(__snake_case ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__snake_case ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = IFImgaImgSuperResolutionPipeline
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
__UpperCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __magic_name__ ( self : str ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : int=0 ):
'''simple docstring'''
if str(_a ).startswith('''mps''' ):
snake_case__ : List[str] = torch.manual_seed(_a )
else:
snake_case__ : List[Any] = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_a ) ).to(_a )
snake_case__ : str = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_a ) ).to(_a )
snake_case__ : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __magic_name__ ( self : int ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __magic_name__ ( self : Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __magic_name__ ( self : str ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : str ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 347 |
from functools import lru_cache
def _A ( __snake_case :int ) -> set:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__snake_case ) )
def _A ( __snake_case :list ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) in (0, 1)
def _A ( __snake_case :int ) -> list:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2
while True:
# Increment each value of a generated range
__SCREAMING_SNAKE_CASE = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__SCREAMING_SNAKE_CASE = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def _A ( __snake_case :int = 4 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 693 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_a : Any = 250_004
_a : str = 250_020
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Dict = MBartaaTokenizer
__lowerCAmelCase : int = MBartaaTokenizerFast
__lowerCAmelCase : Any = True
__lowerCAmelCase : Any = True
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = MBartaaTokenizer(_a , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = '<s>'
UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_a ) , 1_0_5_4 )
def snake_case_ ( self ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = MBartaaTokenizer(_a , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=_a )
UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_a , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def snake_case_ ( self ) -> Any:
"""simple docstring"""
# fmt: off
UpperCAmelCase = {'input_ids': [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def snake_case_ ( self ) -> int:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCAmelCase = self.tokenizer_class.from_pretrained(_a , **_a )
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(_a )
UpperCAmelCase = tokenizer_p.save_pretrained(_a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCAmelCase = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(_a , _a )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(_a )
UpperCAmelCase = tokenizer_p.from_pretrained(_a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_a , _a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_a )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(_a , legacy_format=_a )
UpperCAmelCase = tokenizer_p.save_pretrained(_a )
# Checks it save with the same files
self.assertSequenceEqual(_a , _a )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(_a )
UpperCAmelCase = tokenizer_p.from_pretrained(_a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_a , _a ) )
shutil.rmtree(_a )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = tokenizer_r.save_pretrained(_a , legacy_format=_a )
UpperCAmelCase = tokenizer_p.save_pretrained(_a )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase = tokenizer_r.from_pretrained(_a )
UpperCAmelCase = tokenizer_p.from_pretrained(_a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_a , _a ) )
shutil.rmtree(_a )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : str = "facebook/mbart-large-50-one-to-many-mmt"
__lowerCAmelCase : str = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__lowerCAmelCase : List[str] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__lowerCAmelCase : Optional[Any] = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def snake_case_ ( cls ) -> Any:
"""simple docstring"""
UpperCAmelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
UpperCAmelCase = 1
return cls
def snake_case_ ( self ) -> int:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 2_5_0_0_3_8 )
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _a )
def snake_case_ ( self ) -> int:
"""simple docstring"""
self.assertIn(_a , self.tokenizer.all_special_ids )
UpperCAmelCase = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
UpperCAmelCase = self.tokenizer.decode(_a , skip_special_tokens=_a )
UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
self.assertNotIn(self.tokenizer.eos_token , _a )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = ['this is gunna be a long sentence ' * 2_0]
assert isinstance(src_text[0] , _a )
UpperCAmelCase = 1_0
UpperCAmelCase = self.tokenizer(_a , max_length=_a , truncation=_a ).input_ids[0]
self.assertEqual(ids[0] , _a )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(_a ) , _a )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_a )
UpperCAmelCase = MBartaaTokenizer.from_pretrained(_a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _a )
@require_torch
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_a , return_tensors='pt' )
UpperCAmelCase = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_a , truncation=_a , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
UpperCAmelCase = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _a )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(self.src_text , padding=_a , truncation=_a , max_length=3 , return_tensors='pt' )
UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=_a , truncation=_a , max_length=1_0 , return_tensors='pt' )
UpperCAmelCase = targets['input_ids']
UpperCAmelCase = shift_tokens_right(_a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(_a ) , {
# en_XX, A, test, EOS
'input_ids': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 2_5_0_0_0_1,
} , )
| 447 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = False
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "kinetics" in model_name:
__SCREAMING_SNAKE_CASE = 400
__SCREAMING_SNAKE_CASE = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__SCREAMING_SNAKE_CASE = 174
__SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "small" in model_name:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = 768
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 2048
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 1280
__SCREAMING_SNAKE_CASE = 5120
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 640
__SCREAMING_SNAKE_CASE = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
if "encoder." in name:
__SCREAMING_SNAKE_CASE = name.replace("encoder." , "" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
return name
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case )
if key.startswith("encoder." ):
__SCREAMING_SNAKE_CASE = key.replace("encoder." , "" )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
if key.startswith("decoder.blocks" ):
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[2] )
__SCREAMING_SNAKE_CASE = "decoder.decoder_layers."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = "videomae.encoder.layer."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = np.load(__snake_case )
return list(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case )
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case )
else:
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
__SCREAMING_SNAKE_CASE = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
if "model" in files:
__SCREAMING_SNAKE_CASE = files["model"]
else:
__SCREAMING_SNAKE_CASE = files["module"]
__SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
__SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Tuple = {}
class _a ( __SCREAMING_SNAKE_CASE ):
A = '''llama'''
A = ['''past_key_values''']
def __init__(self, SCREAMING_SNAKE_CASE_=32000, SCREAMING_SNAKE_CASE_=4096, SCREAMING_SNAKE_CASE_=11008, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="silu", SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=1E-6, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
UpperCAmelCase_: Union[str, Any] = vocab_size
UpperCAmelCase_: Tuple = max_position_embeddings
UpperCAmelCase_: str = hidden_size
UpperCAmelCase_: Tuple = intermediate_size
UpperCAmelCase_: Dict = num_hidden_layers
UpperCAmelCase_: int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_: Tuple = num_attention_heads
UpperCAmelCase_: Optional[int] = num_key_value_heads
UpperCAmelCase_: int = hidden_act
UpperCAmelCase_: Dict = initializer_range
UpperCAmelCase_: Optional[Any] = rms_norm_eps
UpperCAmelCase_: Dict = pretraining_tp
UpperCAmelCase_: Tuple = use_cache
UpperCAmelCase_: Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_a, bos_token_id=_a, eos_token_id=_a, tie_word_embeddings=_a, **_a, )
def __snake_case (self ) -> Optional[int]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, _a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
UpperCAmelCase_: Optional[int] = self.rope_scaling.get("""type""", _a )
UpperCAmelCase_: Tuple = self.rope_scaling.get("""factor""", _a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_a, _a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 556 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase =get_tests_dir("fixtures")
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase_ : int = mock.Mock()
UpperCamelCase_ : Dict = 5_00
UpperCamelCase_ : str = {}
UpperCamelCase_ : str = HTTPError
UpperCamelCase_ : Any = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=_a ) as mock_head:
UpperCamelCase_ : str = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCAmelCase ( self ):
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class A ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCAmelCase ( cls ):
UpperCamelCase_ : List[str] = TOKEN
HfFolder.save_token(_a )
@classmethod
def _UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(_a )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
UpperCamelCase_ : Tuple = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_a , repo_id="""test-feature-extractor""" , push_to_hub=_a , use_auth_token=self._token )
UpperCamelCase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(_a )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
UpperCamelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_a , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=_a , use_auth_token=self._token )
UpperCamelCase_ : Dict = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def _UpperCAmelCase ( self ):
CustomFeatureExtractor.register_for_auto_class()
UpperCamelCase_ : Tuple = CustomFeatureExtractor.from_pretrained(_a )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
UpperCamelCase_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
F"{USER}/test-dynamic-feature-extractor" , trust_remote_code=_a )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 208 |
from math import sqrt
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def _A ( __snake_case :int = 1_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 10**9 ) -> int:
lowercase : Optional[int] =1
lowercase : Optional[int] =2
lowercase : Tuple =0
lowercase : Dict =0
lowercase : Optional[Any] =0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowercase : Tuple =2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92 |
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : List[str] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = encoder_seq_length
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_layers
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = config.num_attention_heads
__SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a )
return config, input_dict
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig(
vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a )
__SCREAMING_SNAKE_CASE = result.last_hidden_state
__SCREAMING_SNAKE_CASE = result.past_key_values
__SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
__SCREAMING_SNAKE_CASE = model(_a )
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) )
def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval()
__SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ =[0.8, 0.9]
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], )
@unittest.skipIf(torch_device == "cpu", "Cant do half precision" )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs[0]
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
__SCREAMING_SNAKE_CASE = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
}
for attn_name, (name, mask) in zip(_a, head_masking.items() ):
__SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_heads, device=_a )
__SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, )
# We check the state of decoder_attentions and cross_attentions just from the last step
__SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __lowerCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a )
__SCREAMING_SNAKE_CASE = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a, _a )
__SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) )
__SCREAMING_SNAKE_CASE = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a )
self.assertEqual(_a, _a )
| 693 | 0 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __A ( lowerCAmelCase_ , lowerCAmelCase_=None ):
_UpperCAmelCase : Dict = None
if token is not None:
_UpperCAmelCase : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
_UpperCAmelCase : List[str] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_UpperCAmelCase : Dict = requests.get(__snake_case , headers=__snake_case ).json()
_UpperCAmelCase : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase : List[Any] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__snake_case ):
_UpperCAmelCase : str = requests.get(url + f"&page={i + 2}" , headers=__snake_case ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __A ( lowerCAmelCase_ , lowerCAmelCase_=None ):
_UpperCAmelCase : Union[str, Any] = None
if token is not None:
_UpperCAmelCase : Optional[int] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
_UpperCAmelCase : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
_UpperCAmelCase : Optional[int] = requests.get(__snake_case , headers=__snake_case ).json()
_UpperCAmelCase : int = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_UpperCAmelCase : Any = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__snake_case ):
_UpperCAmelCase : Dict = requests.get(url + f"&page={i + 2}" , headers=__snake_case ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = None
if token is not None:
_UpperCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
_UpperCAmelCase : str = requests.get(__snake_case , headers=__snake_case , allow_redirects=__snake_case )
_UpperCAmelCase : str = result.headers["""Location"""]
_UpperCAmelCase : Dict = requests.get(__snake_case , allow_redirects=__snake_case )
_UpperCAmelCase : Union[str, Any] = os.path.join(__snake_case , f"{artifact_name}.zip" )
with open(__snake_case , """wb""" ) as fp:
fp.write(response.content )
def __A ( lowerCAmelCase_ , lowerCAmelCase_=None ):
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[int] = None
with zipfile.ZipFile(__snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(__snake_case ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__snake_case ) as f:
for line in f:
_UpperCAmelCase : Dict = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase : List[str] = line[: line.index(""": """ )]
_UpperCAmelCase : int = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_UpperCAmelCase : Optional[Any] = line[len("""FAILED """ ) :]
failed_tests.append(__snake_case )
elif filename == "job_name.txt":
_UpperCAmelCase : Union[str, Any] = line
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__snake_case )} for `errors` "
f"and {len(__snake_case )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
_UpperCAmelCase : str = None
if job_name and job_links:
_UpperCAmelCase : Any = job_links.get(__snake_case , __snake_case )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase : Any = [x + [y] + [job_link] for x, y in zip(__snake_case , __snake_case )]
return result
def __A ( lowerCAmelCase_ , lowerCAmelCase_=None ):
_UpperCAmelCase : str = []
_UpperCAmelCase : Optional[int] = [os.path.join(__snake_case , __snake_case ) for p in os.listdir(__snake_case ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__snake_case , job_links=__snake_case ) )
return errors
def __A ( lowerCAmelCase_ , lowerCAmelCase_=None ):
_UpperCAmelCase : Optional[Any] = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase : Union[str, Any] = counter.most_common()
_UpperCAmelCase : List[Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase : List[str] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase : Dict = dict(sorted(r.items() , key=lambda lowerCAmelCase_ : item[1]["count"] , reverse=__snake_case ) )
return r
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_UpperCAmelCase : List[Any] = test.split("""/""" )[2]
else:
_UpperCAmelCase : List[Any] = None
return test
def __A ( lowerCAmelCase_ , lowerCAmelCase_=None ):
_UpperCAmelCase : Union[str, Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase : Optional[int] = [x for x in logs if x[2] is not None]
_UpperCAmelCase : Tuple = {x[2] for x in logs}
_UpperCAmelCase : List[str] = {}
for test in tests:
_UpperCAmelCase : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase : List[Any] = counter.most_common()
_UpperCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase : List[str] = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase : Dict = {"""count""": n_errors, """errors""": error_counts}
_UpperCAmelCase : List[str] = dict(sorted(r.items() , key=lambda lowerCAmelCase_ : item[1]["count"] , reverse=__snake_case ) )
return r
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = """| no. | error | status |"""
_UpperCAmelCase : Any = """|-:|:-|:-|"""
_UpperCAmelCase : List[Any] = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase : Any = reduced_by_error[error]["""count"""]
_UpperCAmelCase : List[str] = f"| {count} | {error[:100]} | |"
lines.append(__snake_case )
return "\n".join(__snake_case )
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = """| model | no. of errors | major error | count |"""
_UpperCAmelCase : int = """|-:|-:|-:|-:|"""
_UpperCAmelCase : Tuple = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase : Any = reduced_by_model[model]["""count"""]
_UpperCAmelCase , _UpperCAmelCase : int = list(reduced_by_model[model]["""errors"""].items() )[0]
_UpperCAmelCase : Tuple = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__snake_case )
return "\n".join(__snake_case )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
lowerCAmelCase_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase_ : Tuple = get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase_ : int = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase_ : int = k.find(''' / ''')
lowerCAmelCase_ : str = k[index + len(''' / ''') :]
lowerCAmelCase_ : Optional[Any] = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase_ : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase_ : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase_ : List[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase_ : int = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase_ : Optional[Any] = reduce_by_error(errors)
lowerCAmelCase_ : List[Any] = reduce_by_model(errors)
lowerCAmelCase_ : str = make_github_table(reduced_by_error)
lowerCAmelCase_ : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 414 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _A ( __snake_case :BertModel , __snake_case :str , __snake_case :str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__SCREAMING_SNAKE_CASE = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
__SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(__snake_case :str ):
for patt, repl in iter(__snake_case ):
__SCREAMING_SNAKE_CASE = name.replace(__snake_case , __snake_case )
return f'''bert/{name}'''
def create_tf_var(__snake_case :np.ndarray , __snake_case :str , __snake_case :tf.Session ):
__SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE = to_tf_var_name(__snake_case )
__SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE = torch_tensor.T
__SCREAMING_SNAKE_CASE = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = session.run(__snake_case )
print(f'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' )
__SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) )
def _A ( __snake_case :str=None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" )
__SCREAMING_SNAKE_CASE = parser.parse_args(__snake_case )
__SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 693 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ : str =logging.get_logger(__name__)
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = ["""pixel_values"""]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_5_5 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**_a )
SCREAMING_SNAKE_CASE_ : Optional[int] = size if size is not None else {'shortest_edge': 3_8_4}
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(_a , default_to_square=_a )
SCREAMING_SNAKE_CASE_ : Tuple = do_resize
SCREAMING_SNAKE_CASE_ : int = size
# Default value set here for backwards compatibility where the value in config is None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
SCREAMING_SNAKE_CASE_ : str = resample
SCREAMING_SNAKE_CASE_ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE_ : List[str] = rescale_factor
SCREAMING_SNAKE_CASE_ : Any = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_ : List[Any] = size['shortest_edge']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
SCREAMING_SNAKE_CASE_ : List[Any] = int(shortest_edge / crop_pct )
SCREAMING_SNAKE_CASE_ : List[str] = get_resize_output_image_size(_a , size=_a , default_to_square=_a )
SCREAMING_SNAKE_CASE_ : Dict = resize(image=_a , size=_a , resample=_a , data_format=_a , **_a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_a , size=(shortest_edge, shortest_edge) , data_format=_a , **_a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_a , size=(shortest_edge, shortest_edge) , resample=_a , data_format=_a , **_a )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Dict = crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE_ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : Optional[int] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(_a , default_to_square=_a )
SCREAMING_SNAKE_CASE_ : Optional[int] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Optional[Any] = [to_numpy_array(_a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.resize(image=_a , size=_a , crop_pct=_a , resample=_a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Tuple = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : str = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple = [to_channel_dimension_format(_a , _a ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple = {'pixel_values': images}
return BatchFeature(data=_a , tensor_type=_a )
| 101 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""]
def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str:
super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a )
__SCREAMING_SNAKE_CASE = chunk_length_s
__SCREAMING_SNAKE_CASE = overlap
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = bool(
isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) )
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a, np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa )
elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
__SCREAMING_SNAKE_CASE = "max_length"
else:
__SCREAMING_SNAKE_CASE = input_values
# normal padding on batch
if padded_inputs is None:
__SCREAMING_SNAKE_CASE = self.pad(
_a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, )
if padding:
__SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" )
__SCREAMING_SNAKE_CASE = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
__SCREAMING_SNAKE_CASE = example[..., None]
input_values.append(example.T )
__SCREAMING_SNAKE_CASE = input_values
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 693 | 0 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
snake_case_ = sorted(string.lower() )
return len(__snake_case ) == len(set(__snake_case ) )
if __name__ == "__main__":
lowerCAmelCase_ = input('''Enter a string ''').strip()
lowerCAmelCase_ = is_isogram(input_str)
print(f"""{input_str} is {"an" if isogram else "not an"} isogram.""") | 39 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =42
SCREAMING_SNAKE_CASE__ =42
def __init__( self, _a, _a ) -> Dict:
super().__init__()
self.register_modules(unet=_a, scheduler=_a )
@torch.no_grad()
def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]:
__SCREAMING_SNAKE_CASE = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE = self.unet
__SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE = sample.to(self.device )
self.scheduler.set_timesteps(_a )
self.scheduler.set_sigmas(_a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample
# prediction step
__SCREAMING_SNAKE_CASE = model(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean
__SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 )
__SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_a )
| 693 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class a__ ( __SCREAMING_SNAKE_CASE ):
snake_case__ = '''pix2struct_text_model'''
snake_case__ = ['''past_key_values''']
snake_case__ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[Any] ,a__ : Tuple=5_0244 ,a__ : int=768 ,a__ : List[str]=64 ,a__ : int=2048 ,a__ : Optional[int]=12 ,a__ : Union[str, Any]=12 ,a__ : Any=32 ,a__ : Union[str, Any]=128 ,a__ : Optional[int]=0.1 ,a__ : Optional[Any]=1E-6 ,a__ : str=1.0 ,a__ : List[str]="gelu_new" ,a__ : Union[str, Any]=0 ,a__ : List[str]=False ,a__ : Optional[Any]=0 ,a__ : Optional[Any]=1 ,a__ : str=False ,a__ : str=True ,**a__ : Tuple ,) -> str:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = vocab_size
_lowerCAmelCase:str = hidden_size
_lowerCAmelCase:List[str] = d_kv
_lowerCAmelCase:List[str] = d_ff
_lowerCAmelCase:Optional[int] = num_layers
_lowerCAmelCase:Union[str, Any] = num_heads
_lowerCAmelCase:Dict = relative_attention_num_buckets
_lowerCAmelCase:Union[str, Any] = relative_attention_max_distance
_lowerCAmelCase:List[Any] = dropout_rate
_lowerCAmelCase:Optional[int] = layer_norm_epsilon
_lowerCAmelCase:Tuple = initializer_factor
_lowerCAmelCase:Any = use_cache
_lowerCAmelCase:str = eos_token_id
_lowerCAmelCase:Union[str, Any] = decoder_start_token_id
# for backwards compatibility
_lowerCAmelCase:Optional[int] = dense_act_fn
super().__init__(
pad_token_id=_a ,eos_token_id=_a ,decoder_start_token_id=_a ,tie_word_embeddings=_a ,is_decoder=_a ,**_a ,)
@classmethod
def __UpperCamelCase ( cls : int ,a__ : List[str] ,**a__ : int) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_a)
_lowerCAmelCase , _lowerCAmelCase:Union[str, Any] = cls.get_config_dict(_a ,**_a)
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''') == "pix2struct":
_lowerCAmelCase:List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(_a ,**_a)
class a__ ( __SCREAMING_SNAKE_CASE ):
snake_case__ = '''pix2struct_vision_model'''
def __init__( self : List[Any] ,a__ : List[str]=768 ,a__ : Union[str, Any]=768 ,a__ : Optional[int]=2048 ,a__ : str=64 ,a__ : Optional[int]=12 ,a__ : int=12 ,a__ : List[str]="gelu_new" ,a__ : Optional[int]=1E-6 ,a__ : Dict=0.0 ,a__ : Optional[int]=0.0 ,a__ : Any=1E-10 ,a__ : Optional[int]=1.0 ,a__ : Any=4096 ,a__ : Tuple=32 ,a__ : List[Any]=128 ,**a__ : Union[str, Any] ,) -> Optional[int]:
"""simple docstring"""
super().__init__(**_a)
_lowerCAmelCase:Dict = hidden_size
_lowerCAmelCase:Any = patch_embed_hidden_size
_lowerCAmelCase:Optional[int] = d_ff
_lowerCAmelCase:List[Any] = dropout_rate
_lowerCAmelCase:List[Any] = num_hidden_layers
_lowerCAmelCase:List[str] = num_attention_heads
_lowerCAmelCase:str = initializer_range
_lowerCAmelCase:Optional[Any] = initializer_factor
_lowerCAmelCase:Optional[int] = attention_dropout
_lowerCAmelCase:Optional[Any] = layer_norm_eps
_lowerCAmelCase:List[Any] = dense_act_fn
_lowerCAmelCase:int = seq_len
_lowerCAmelCase:Union[str, Any] = relative_attention_num_buckets
_lowerCAmelCase:Union[str, Any] = relative_attention_max_distance
_lowerCAmelCase:Optional[int] = d_kv
@classmethod
def __UpperCamelCase ( cls : int ,a__ : Union[str, Any] ,**a__ : str) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_a)
_lowerCAmelCase , _lowerCAmelCase:int = cls.get_config_dict(_a ,**_a)
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''') == "pix2struct":
_lowerCAmelCase:int = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(_a ,**_a)
class a__ ( __SCREAMING_SNAKE_CASE ):
snake_case__ = '''pix2struct'''
snake_case__ = True
def __init__( self : List[Any] ,a__ : Optional[int]=None ,a__ : Tuple=None ,a__ : Dict=1.0 ,a__ : Optional[Any]=0.02 ,a__ : Union[str, Any]=False ,a__ : str=False ,a__ : Any=True ,**a__ : Any ,) -> Any:
"""simple docstring"""
super().__init__(tie_word_embeddings=_a ,is_encoder_decoder=_a ,**_a)
if text_config is None:
_lowerCAmelCase:Tuple = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''')
if vision_config is None:
_lowerCAmelCase:Tuple = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''')
_lowerCAmelCase:List[Any] = PixaStructTextConfig(**_a)
_lowerCAmelCase:Any = PixaStructVisionConfig(**_a)
_lowerCAmelCase:Tuple = self.text_config.decoder_start_token_id
_lowerCAmelCase:int = self.text_config.pad_token_id
_lowerCAmelCase:Tuple = self.text_config.eos_token_id
_lowerCAmelCase:List[str] = initializer_factor
_lowerCAmelCase:Dict = initializer_range
_lowerCAmelCase:Optional[int] = self.initializer_range
_lowerCAmelCase:List[Any] = self.initializer_range
_lowerCAmelCase:int = is_vqa
@classmethod
def __UpperCamelCase ( cls : List[str] ,a__ : Optional[int] ,a__ : str ,**a__ : Optional[int]) -> Any:
"""simple docstring"""
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**_a)
def __UpperCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:int = copy.deepcopy(self.__dict__)
_lowerCAmelCase:Tuple = self.text_config.to_dict()
_lowerCAmelCase:int = self.vision_config.to_dict()
_lowerCAmelCase:Any = self.__class__.model_type
return output
| 227 |
def _A ( __snake_case :int = 400_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
from __future__ import annotations
def __a ( __lowerCAmelCase ) -> list[int]: # This function is recursive
SCREAMING_SNAKE_CASE : Union[str, Any] = len(__snake_case )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
SCREAMING_SNAKE_CASE : Optional[int] = array[0]
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : str = []
while not is_found and i < array_length:
if array[i] < pivot:
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Tuple = [element for element in array[i:] if element >= array[i]]
SCREAMING_SNAKE_CASE : Optional[int] = longest_subsequence(__snake_case )
if len(__snake_case ) > len(__snake_case ):
SCREAMING_SNAKE_CASE : Union[str, Any] = temp_array
else:
i += 1
SCREAMING_SNAKE_CASE : Optional[int] = [element for element in array[1:] if element >= pivot]
SCREAMING_SNAKE_CASE : Dict = [pivot, *longest_subsequence(__snake_case )]
if len(__snake_case ) > len(__snake_case ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod() | 352 |
from __future__ import annotations
_snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(__snake_case )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 693 | 0 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ : List[str] = {'UserAgent': UserAgent().random}
def _a ( __lowerCAmelCase : List[Any] ):
"""simple docstring"""
snake_case__ : List[Any] = script.contents[0]
snake_case__ : List[Any] = json.loads(data[data.find('''{\"config\"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class a :
"""simple docstring"""
def __init__( self : Dict , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : str = F"""https://www.instagram.com/{username}/"""
snake_case__ : List[str] = self.get_json()
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : Optional[int] = requests.get(self.url , headers=_a ).text
snake_case__ : Optional[int] = BeautifulSoup(_a , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[Any] ):
'''simple docstring'''
return F"""{self.__class__.__name__}(\'{self.username}\')"""
def __str__( self : Tuple ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __magic_name__ ( self : int ):
'''simple docstring'''
return self.user_data["username"]
@property
def __magic_name__ ( self : str ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __magic_name__ ( self : Dict ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __magic_name__ ( self : str ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __magic_name__ ( self : int ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __magic_name__ ( self : Dict ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __magic_name__ ( self : Dict ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __magic_name__ ( self : int ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __magic_name__ ( self : Dict ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return self.user_data["is_private"]
def _a ( __lowerCAmelCase : str = "github" ):
"""simple docstring"""
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
snake_case__ : List[Any] = InstagramUser(__snake_case )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __snake_case )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : List[Any] = InstagramUser("""github""")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 347 |
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
return f'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __getitem__( self, _a ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, _a, _a ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
__SCREAMING_SNAKE_CASE = self.head
for _ in range(_a ):
__SCREAMING_SNAKE_CASE = current.next
__SCREAMING_SNAKE_CASE = data
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(len(self ), _a )
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(0, _a )
def __lowerCAmelCase ( self, _a, _a ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
__SCREAMING_SNAKE_CASE = Node(_a )
if self.head is None:
__SCREAMING_SNAKE_CASE = new_node
elif index == 0:
__SCREAMING_SNAKE_CASE = self.head # link new_node to head
__SCREAMING_SNAKE_CASE = new_node
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = new_node
def __lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def __lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self, _a = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
__SCREAMING_SNAKE_CASE = self.head # default first node
if index == 0:
__SCREAMING_SNAKE_CASE = self.head.next
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> bool:
return self.head is None
def __lowerCAmelCase ( self ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.head
while current:
# Store the current node's next node.
__SCREAMING_SNAKE_CASE = current.next
# Make the current node's next point backwards
__SCREAMING_SNAKE_CASE = prev
# Make the previous node be the current node
__SCREAMING_SNAKE_CASE = current
# Make the current node the next node (to progress iteration)
__SCREAMING_SNAKE_CASE = next_node
# Return prev in order to put the head at the end
__SCREAMING_SNAKE_CASE = prev
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__SCREAMING_SNAKE_CASE = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__SCREAMING_SNAKE_CASE = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__SCREAMING_SNAKE_CASE = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__SCREAMING_SNAKE_CASE = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__snake_case )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
__SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip()
print("New list:" )
print(__snake_case )
print(f'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 693 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : list[list] ):
UpperCAmelCase = current_set.copy()
for row_index, row in enumerate(__snake_case ):
UpperCAmelCase = row[0]
for column_index, column in enumerate(__snake_case ):
if magnitude == 0:
UpperCAmelCase = column
continue
UpperCAmelCase = column / magnitude
# Subtract to cancel term
UpperCAmelCase = current_set[0]
UpperCAmelCase = [first_row]
UpperCAmelCase = current_set[1::]
for row in current_set:
UpperCAmelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__snake_case )
continue
for column_index in range(len(__snake_case ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__snake_case )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCAmelCase = final_set[0]
UpperCAmelCase = []
UpperCAmelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCAmelCase = simplify(__snake_case )
for i in range(len(__snake_case ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __snake_case )
UpperCAmelCase = resultant
return final_set
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : list[list] ):
if len(__snake_case ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
UpperCAmelCase = len(__snake_case ) + 1
if any(len(__snake_case ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(__snake_case , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(__snake_case ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCAmelCase = equations.copy()
if any(0 in row for row in data_set ):
UpperCAmelCase = data_set.copy()
UpperCAmelCase = []
for row_index, row in enumerate(__snake_case ):
if 0 not in row:
UpperCAmelCase = data_set.pop(__snake_case )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , __snake_case )
UpperCAmelCase = data_set.copy()
UpperCAmelCase = simplify(__snake_case )
UpperCAmelCase = simplified[::-1]
UpperCAmelCase = []
for row in simplified:
UpperCAmelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCAmelCase = row.copy()[: len(__snake_case ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__snake_case ) == 0:
solutions.append(0 )
continue
UpperCAmelCase = temp_row[1::]
UpperCAmelCase = temp_row[::-1]
for column_index, column in enumerate(__snake_case ):
current_solution -= column * solutions[column_index]
solutions.append(__snake_case )
UpperCAmelCase = []
for item in solutions:
final.append(float(round(__snake_case , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : List[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 447 |
import argparse
import json
from tqdm import tqdm
def _A ( ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__snake_case , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__snake_case , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__snake_case , help="where to store parsed gold_data_path file" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
__SCREAMING_SNAKE_CASE = json.load(__snake_case )
for dpr_record in tqdm(__snake_case ):
__SCREAMING_SNAKE_CASE = dpr_record["question"]
__SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__snake_case ) + "\n" )
if __name__ == "__main__":
main()
| 693 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=36, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=1000, ) -> Optional[int]:
UpperCAmelCase_: List[str] = parent
UpperCAmelCase_: Union[str, Any] = batch_size
UpperCAmelCase_: Tuple = num_channels
UpperCAmelCase_: Dict = image_size
UpperCAmelCase_: Any = patch_size
UpperCAmelCase_: str = text_seq_length
UpperCAmelCase_: str = is_training
UpperCAmelCase_: Dict = use_input_mask
UpperCAmelCase_: int = use_token_type_ids
UpperCAmelCase_: Optional[int] = use_labels
UpperCAmelCase_: Optional[int] = vocab_size
UpperCAmelCase_: List[str] = hidden_size
UpperCAmelCase_: List[str] = num_hidden_layers
UpperCAmelCase_: Any = num_attention_heads
UpperCAmelCase_: int = intermediate_size
UpperCAmelCase_: List[str] = hidden_act
UpperCAmelCase_: Tuple = hidden_dropout_prob
UpperCAmelCase_: Tuple = attention_probs_dropout_prob
UpperCAmelCase_: Union[str, Any] = max_position_embeddings
UpperCAmelCase_: str = type_vocab_size
UpperCAmelCase_: Dict = type_sequence_label_size
UpperCAmelCase_: Union[str, Any] = initializer_range
UpperCAmelCase_: Union[str, Any] = coordinate_size
UpperCAmelCase_: Optional[int] = shape_size
UpperCAmelCase_: Optional[Any] = num_labels
UpperCAmelCase_: Optional[int] = num_choices
UpperCAmelCase_: Dict = scope
UpperCAmelCase_: Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase_: int = text_seq_length
UpperCAmelCase_: Union[str, Any] = (image_size // patch_size) ** 2 + 1
UpperCAmelCase_: Optional[Any] = self.text_seq_length + self.image_seq_length
def __snake_case (self ) -> Dict:
UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
UpperCAmelCase_: List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_: Optional[Any] = bbox[i, j, 3]
UpperCAmelCase_: int = bbox[i, j, 1]
UpperCAmelCase_: Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_: int = bbox[i, j, 2]
UpperCAmelCase_: Dict = bbox[i, j, 0]
UpperCAmelCase_: Optional[Any] = t
UpperCAmelCase_: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_: Tuple = None
if self.use_input_mask:
UpperCAmelCase_: Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase_: List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_: Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
UpperCAmelCase_: Dict = None
UpperCAmelCase_: Tuple = None
if self.use_labels:
UpperCAmelCase_: List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase_: Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
UpperCAmelCase_: str = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: Optional[int] = LayoutLMvaModel(config=_a )
model.to(_a )
model.eval()
# text + image
UpperCAmelCase_: int = model(_a, pixel_values=_a )
UpperCAmelCase_: Tuple = model(
_a, bbox=_a, pixel_values=_a, attention_mask=_a, token_type_ids=_a )
UpperCAmelCase_: Any = model(_a, bbox=_a, pixel_values=_a, token_type_ids=_a )
UpperCAmelCase_: Optional[Any] = model(_a, bbox=_a, pixel_values=_a )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase_: Dict = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase_: Optional[Any] = model(pixel_values=_a )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: int = self.num_labels
UpperCAmelCase_: str = LayoutLMvaForSequenceClassification(_a )
model.to(_a )
model.eval()
UpperCAmelCase_: Optional[int] = model(
_a, bbox=_a, pixel_values=_a, attention_mask=_a, token_type_ids=_a, labels=_a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Union[str, Any] = self.num_labels
UpperCAmelCase_: int = LayoutLMvaForTokenClassification(config=_a )
model.to(_a )
model.eval()
UpperCAmelCase_: int = model(
_a, bbox=_a, pixel_values=_a, attention_mask=_a, token_type_ids=_a, labels=_a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: str = LayoutLMvaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
UpperCAmelCase_: List[Any] = model(
_a, bbox=_a, pixel_values=_a, attention_mask=_a, token_type_ids=_a, start_positions=_a, end_positions=_a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
): Dict = config_and_inputs
UpperCAmelCase_: str = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A = False
A = False
A = False
A = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __snake_case (self ) -> int:
UpperCAmelCase_: Tuple = LayoutLMvaModelTester(self )
UpperCAmelCase_: List[str] = ConfigTester(self, config_class=_a, hidden_size=37 )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[int]:
UpperCAmelCase_: str = copy.deepcopy(_a )
if model_class in get_values(_a ):
UpperCAmelCase_: Union[str, Any] = {
k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous()
if isinstance(_a, torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_a ):
UpperCAmelCase_: Any = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=_a )
elif model_class in get_values(_a ):
UpperCAmelCase_: Optional[Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_a )
UpperCAmelCase_: Tuple = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_a )
elif model_class in [
*get_values(_a ),
]:
UpperCAmelCase_: Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_a )
elif model_class in [
*get_values(_a ),
]:
UpperCAmelCase_: str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=_a, )
return inputs_dict
def __snake_case (self ) -> Dict:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Any:
UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __snake_case (self ) -> Dict:
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_: Union[str, Any] = type
self.model_tester.create_and_check_model(*_a )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
@slow
def __snake_case (self ) -> Optional[int]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_: Dict = LayoutLMvaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> int:
return LayoutLMvaImageProcessor(apply_ocr=_a ) if is_vision_available() else None
@slow
def __snake_case (self ) -> Any:
UpperCAmelCase_: Union[str, Any] = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(_a )
UpperCAmelCase_: Optional[int] = self.default_image_processor
UpperCAmelCase_: List[Any] = prepare_img()
UpperCAmelCase_: int = image_processor(images=_a, return_tensors="""pt""" ).pixel_values.to(_a )
UpperCAmelCase_: Optional[Any] = torch.tensor([[1, 2]] )
UpperCAmelCase_: str = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCAmelCase_: List[Any] = model(
input_ids=input_ids.to(_a ), bbox=bbox.to(_a ), pixel_values=pixel_values.to(_a ), )
# verify the logits
UpperCAmelCase_: str = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape, _a )
UpperCAmelCase_: int = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(_a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], _a, atol=1E-4 ) )
| 556 |
def _A ( __snake_case :int = 10**9 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ):
pass
@is_pipeline_test
@require_vision
class A ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def _UpperCAmelCase ( self ):
UpperCamelCase_ : str = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
UpperCamelCase_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCamelCase_ : Dict = image_classifier(_a , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ) , [
[{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """b"""}, {"""score""": 0.3_33, """label""": """c"""}],
[{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """c"""}, {"""score""": 0.3_33, """label""": """b"""}],
] , )
UpperCamelCase_ : List[str] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
] , )
@require_tf
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
UpperCamelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCamelCase_ : Dict = image_classifier(_a , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(_a ) , [{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """b"""}, {"""score""": 0.3_33, """label""": """c"""}] , )
UpperCamelCase_ : str = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
] , )
@slow
@require_torch
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCamelCase_ : Any = image_classifier(_a , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(_a ) , [
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
] , )
UpperCamelCase_ : str = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCamelCase_ : int = image_classifier(_a , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(_a ) , [
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
] , )
UpperCamelCase_ : Dict = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
],
]
* 5 , )
| 208 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_snake_case , _snake_case , _snake_case : List[Any] = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =None
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =None
# Automatically constructed
SCREAMING_SNAKE_CASE__ ="dict"
SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Optional[int]:
return self.pa_type
def __lowerCAmelCase ( self, _a ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_a, _a ):
return {"bytes": None, "path": value}
elif isinstance(_a, _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__SCREAMING_SNAKE_CASE = BytesIO()
sf.write(_a, value["array"], value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
__SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67
__SCREAMING_SNAKE_CASE = BytesIO(bytes() )
sf.write(_a, _a, value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self, _a, _a = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
__SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
__SCREAMING_SNAKE_CASE = token_per_repo_id or {}
__SCREAMING_SNAKE_CASE = path.split("::" )[-1]
try:
__SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"]
__SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__SCREAMING_SNAKE_CASE = None
with xopen(_a, "rb", use_auth_token=_a ) as f:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
__SCREAMING_SNAKE_CASE = array.T
if self.mono:
__SCREAMING_SNAKE_CASE = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate )
__SCREAMING_SNAKE_CASE = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
__SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("bytes" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("path" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
return array_cast(_a, self.pa_type )
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a, "rb" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
return bytes_
__SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
__SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
| 693 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 694 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
'''simple docstring'''
_a : List[str] =None
_a : Optional[Any] =None
_a : str =graph
self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =len(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =None
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
if sources is int:
_a : Tuple =[sources]
if sinks is int:
_a : Optional[int] =[sinks]
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
return
_a : Union[str, Any] =sources[0]
_a : Tuple =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:
_a : Tuple =0
for i in sources:
max_input_flow += sum(self.graph[i] )
_a : List[Any] =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_a : Any =max_input_flow
_a : List[str] =0
_a : List[str] =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_a : str =max_input_flow
_a : Optional[Any] =size - 1
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int:
'''simple docstring'''
_a : Tuple =algorithm(self )
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =flow_network
_a : List[Any] =flow_network.verticesCount
_a : str =flow_network.sourceIndex
_a : str =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_a : List[Any] =flow_network.graph
_a : Optional[int] =False
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_a : Any =True
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
# use this to save your result
_a : List[Any] =-1
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
_a : int =[[0] * self.verticies_count for i in range(self.verticies_count )]
_a : Union[str, Any] =[0] * self.verticies_count
_a : Optional[Any] =[0] * self.verticies_count
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_a : Tuple =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_a : List[Any] =0
while i < len(SCREAMING_SNAKE_CASE ):
_a : Any =vertices_list[i]
_a : str =self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) )
_a : List[str] =0
else:
i += 1
_a : Optional[int] =sum(self.preflow[self.source_index] )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.relabel(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
_a : List[str] =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]:
'''simple docstring'''
_a : int =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_a : Optional[Any] =self.heights[to_index]
if min_height is not None:
_a : Any =min_height + 1
if __name__ == "__main__":
A__: str = [0]
A__: Optional[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A__: Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A__: List[str] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}")
| 694 | 1 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" ,["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" ,["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" ,[None, """v2"""] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[Any] ) -> Any:
_a : Union[str, Any] =hf_hub_url(repo_id=_UpperCAmelCase ,path=_UpperCAmelCase ,revision=_UpperCAmelCase )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(_UpperCAmelCase )}"
| 694 |
'''simple docstring'''
A__: Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A__ :
__UpperCamelCase : int
__UpperCamelCase : int
class A__ :
def __init__( self :Any , SCREAMING_SNAKE_CASE :int ) -> List[str]:
'''simple docstring'''
_a : list[list[Edge]] =[[] for _ in range(SCREAMING_SNAKE_CASE )]
_a : Tuple =size
def __getitem__( self :int , SCREAMING_SNAKE_CASE :int ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def __UpperCAmelCase ( self :List[str] ) -> int:
'''simple docstring'''
return self._size
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int ) -> Optional[int]:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int ) -> int | None:
'''simple docstring'''
_a : Optional[int] =deque([start_vertex] )
_a : list[int | None] =[None] * self.size
_a : Optional[int] =0
while queue:
_a : Union[str, Any] =queue.popleft()
_a : str =distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a : List[str] =current_distance + edge.weight
_a : Optional[Any] =distances[edge.destination_vertex]
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and new_distance >= dest_vertex_distance
):
continue
_a : Union[str, Any] =new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(125.50, 0.05) = }")
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> bool:
_a : int =int(number**0.5 )
return number == sq * sq
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> tuple[int, int]:
_a : int =x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_a : int =x_den * y_den * z_den
_a : int =gcd(_UpperCAmelCase ,_UpperCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 35 ) -> int:
_a : set =set()
_a : int
_a : Fraction =Fraction(0 )
_a : tuple[int, int]
for x_num in range(1 ,order + 1 ):
for x_den in range(x_num + 1 ,order + 1 ):
for y_num in range(1 ,order + 1 ):
for y_den in range(y_num + 1 ,order + 1 ):
# n=1
_a : Any =x_num * y_den + x_den * y_num
_a : str =x_den * y_den
_a : str =gcd(_UpperCAmelCase ,_UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : List[Any] =add_three(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
_a : List[Any] =(
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_a : List[str] =x_den * x_den * y_den * y_den
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
_a : str =int(sqrt(_UpperCAmelCase ) )
_a : List[Any] =int(sqrt(_UpperCAmelCase ) )
_a : Any =gcd(_UpperCAmelCase ,_UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : Optional[int] =add_three(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=-1
_a : Tuple =x_num * y_num
_a : Union[str, Any] =x_den * y_num + x_num * y_den
_a : List[Any] =gcd(_UpperCAmelCase ,_UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : int =add_three(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
_a : int =x_num * x_num * y_num * y_num
_a : Union[str, Any] =(
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
_a : Union[str, Any] =int(sqrt(_UpperCAmelCase ) )
_a : str =int(sqrt(_UpperCAmelCase ) )
_a : List[str] =gcd(_UpperCAmelCase ,_UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : str =add_three(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
for num, den in unique_s:
total += Fraction(_UpperCAmelCase ,_UpperCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 694 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A__ ( unittest.TestCase ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =parent
_a : List[str] =batch_size
_a : List[str] =seq_length
_a : List[Any] =is_training
_a : Optional[int] =use_attention_mask
_a : List[Any] =use_token_type_ids
_a : List[Any] =use_labels
_a : Optional[Any] =vocab_size
_a : str =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Union[str, Any] =intermediate_size
_a : int =hidden_act
_a : List[str] =hidden_dropout_prob
_a : Optional[int] =attention_probs_dropout_prob
_a : Dict =max_position_embeddings
_a : Any =type_vocab_size
_a : str =type_sequence_label_size
_a : str =initializer_range
_a : List[str] =num_choices
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict =None
if self.use_attention_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
if self.use_token_type_ids:
_a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
_a : Tuple =self.prepare_config_and_inputs()
_a , _a , _a , _a : List[Any] =config_and_inputs
_a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
_a , _a , _a , _a : Optional[int] =config_and_inputs
_a : Tuple =True
_a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self :str ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Dict =model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Any ) -> str:
'''simple docstring'''
_a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Dict =model(SCREAMING_SNAKE_CASE )[0]
_a : List[Any] =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_a : Any =np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694 | 1 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
A__: Optional[Any] = logging.getLogger(__name__)
@dataclass
class A__ :
__UpperCamelCase : Optional[str] = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__UpperCamelCase : Optional[str] = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
__UpperCamelCase : int = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
__UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "A csv or a json file containing the training data."} )
__UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "A csv or a json file containing the validation data."} )
__UpperCamelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A csv or a json file containing the test data."} )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
_a : Optional[Any] =self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_a : Dict =self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class A__ :
__UpperCamelCase : str = field(
default=UpperCAmelCase__ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a : List[str] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a , _a , _a : Optional[int] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a , _a , _a : Union[str, Any] =parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
_a : Tuple =training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_a : Dict =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_a : str =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_a : Dict =load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_a : List[Any] ={"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_a : List[Any] =data_args.train_file.split(""".""" )[-1]
_a : List[str] =data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_a : Tuple =data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
_a : Union[str, Any] =load_dataset("""csv""" ,data_files=_UpperCAmelCase ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_a : int =load_dataset("""json""" ,data_files=_UpperCAmelCase ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_a : Optional[Any] =raw_datasets["""train"""].features["""label"""].names
_a : Dict =len(_UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a : Tuple =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=_UpperCAmelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
_a : Optional[int] =TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=_UpperCAmelCase ,)
_a : Dict =BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=_UpperCAmelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
_a : Optional[Any] ="""max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_a : List[Any] =False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_a : Dict ={"""Refused""": 0, """Entailed""": 1}
_a : str ={0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_a : Optional[Any] =min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(_UpperCAmelCase : int ):
# Tokenize the texts
def _convert_table_text_to_pandas(_UpperCAmelCase : int ):
_a : List[Any] =[_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
_a : Union[str, Any] =pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
_a : Union[str, Any] =examples["""statement"""]
_a : Any =list(map(_convert_table_text_to_pandas ,examples["""table_text"""] ) )
_a : Any =tokenizer(_UpperCAmelCase ,_UpperCAmelCase ,padding=_UpperCAmelCase ,max_length=_UpperCAmelCase ,truncation=_UpperCAmelCase )
_a : Any =examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
_a : List[str] =raw_datasets.map(
_UpperCAmelCase ,batched=_UpperCAmelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc="""Running tokenizer on dataset""" ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
_a : Union[str, Any] =raw_datasets["""train"""]
if data_args.max_train_samples is not None:
_a : List[Any] =train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
_a : List[Any] =raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
_a : Tuple =eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
_a : Any =raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
_a : Union[str, Any] =predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_UpperCAmelCase ) ) ,3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase : EvalPrediction ):
_a : str =p.predictions[0] if isinstance(p.predictions ,_UpperCAmelCase ) else p.predictions
_a : Dict =np.argmax(_UpperCAmelCase ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_a : List[str] =default_data_collator
elif training_args.fpaa:
_a : Tuple =DataCollatorWithPadding(_UpperCAmelCase ,pad_to_multiple_of=8 )
else:
_a : Optional[Any] =None
# Initialize our Trainer
_a : int =Trainer(
model=_UpperCAmelCase ,args=_UpperCAmelCase ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=_UpperCAmelCase ,tokenizer=_UpperCAmelCase ,data_collator=_UpperCAmelCase ,)
# Training
if training_args.do_train:
_a : int =None
if training_args.resume_from_checkpoint is not None:
_a : List[str] =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_a : int =last_checkpoint
_a : Dict =trainer.train(resume_from_checkpoint=_UpperCAmelCase )
_a : Union[str, Any] =train_result.metrics
_a : List[Any] =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
_a : str =min(_UpperCAmelCase ,len(_UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" ,_UpperCAmelCase )
trainer.save_metrics("""train""" ,_UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_a : Union[str, Any] =trainer.evaluate(eval_dataset=_UpperCAmelCase )
_a : Tuple =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
_a : Tuple =min(_UpperCAmelCase ,len(_UpperCAmelCase ) )
trainer.log_metrics("""eval""" ,_UpperCAmelCase )
trainer.save_metrics("""eval""" ,_UpperCAmelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_a : Any =predict_dataset.remove_columns("""label""" )
_a : List[Any] =trainer.predict(_UpperCAmelCase ,metric_key_prefix="""predict""" ).predictions
_a : Any =np.argmax(_UpperCAmelCase ,axis=1 )
_a : Tuple =os.path.join(training_args.output_dir ,"""predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase ,"""w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(_UpperCAmelCase ):
_a : Optional[int] =label_list[item]
writer.write(F"{index}\t{item}\n" )
_a : Dict ={"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A__: Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]:
return field(default_factory=lambda: default ,metadata=_UpperCAmelCase )
@dataclass
class A__ :
__UpperCamelCase : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__UpperCamelCase : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__UpperCamelCase : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
__UpperCamelCase : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__UpperCamelCase : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
__UpperCamelCase : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
__UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 694 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
A__: int = None
A__: Union[str, Any] = logging.get_logger(__name__)
A__: Dict = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A__: List[Any] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
A__: List[str] = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
A__: List[str] = '''▁'''
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Dict = BigBirdTokenizer
__UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"]
__UpperCamelCase : List[int] = []
def __init__( self :Any , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :List[str]=None , SCREAMING_SNAKE_CASE :Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE :List[str]="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :str="<pad>" , SCREAMING_SNAKE_CASE :Optional[Any]="[SEP]" , SCREAMING_SNAKE_CASE :str="[MASK]" , SCREAMING_SNAKE_CASE :Union[str, Any]="[CLS]" , **SCREAMING_SNAKE_CASE :str , ) -> str:
'''simple docstring'''
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else bos_token
_a : Tuple =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else eos_token
_a : Optional[int] =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else unk_token
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else pad_token
_a : Any =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else cls_token
_a : Optional[int] =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_a : int =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : str =vocab_file
_a : Union[str, Any] =False if not self.vocab_file else True
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : int =[self.sep_token_id]
_a : Any =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : Optional[int] =[self.sep_token_id]
_a : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : Optional[int] =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 694 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]:
'''simple docstring'''
_a : int =1.0 if scale is None else scale
_a : Optional[Any] =0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] )
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Tuple =args_dim
_a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
_a : Dict =domain_map
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]:
'''simple docstring'''
_a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE )
class A__ ( nn.Module ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int:
'''simple docstring'''
super().__init__()
_a : List[Any] =function
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
class A__ :
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None:
'''simple docstring'''
_a : Any =dim
_a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution:
'''simple docstring'''
_a : str =self._base_distribution(SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCAmelCase ( self :Any ) -> float:
'''simple docstring'''
return 0.0
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
_a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict:
'''simple docstring'''
_a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]:
'''simple docstring'''
_a : int =cls.squareplus(SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution:
'''simple docstring'''
_a , _a : Any =distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution:
'''simple docstring'''
_a , _a : Optional[int] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 1 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf )
_a : int =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_a : Tuple =new_cost_f
_a : Optional[Any] =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_a : str =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int:
_a : Optional[Any] =-1
_a : List[str] =set()
_a : Optional[int] =set()
_a : Optional[int] ={source: 0}
_a : List[str] ={destination: 0}
_a : Union[str, Any] ={source: None}
_a : Dict ={destination: None}
_a : PriorityQueue[Any] =PriorityQueue()
_a : PriorityQueue[Any] =PriorityQueue()
_a : Optional[int] =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_a , _a : str =queue_forward.get()
visited_forward.add(_UpperCAmelCase )
_a , _a : List[Any] =queue_backward.get()
visited_backward.add(_UpperCAmelCase )
_a : int =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_a : Any =shortest_distance
return shortest_path_distance
A__: Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
A__: str = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ) -> float:
return 10 - x * x
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(_UpperCAmelCase ) * equation(_UpperCAmelCase ) >= 0:
raise ValueError("""Wrong space!""" )
_a : Dict =a
while (b - a) >= 0.0_1:
# Find middle point
_a : Union[str, Any] =(a + b) / 2
# Check if middle point is root
if equation(_UpperCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_UpperCAmelCase ) * equation(_UpperCAmelCase ) < 0:
_a : Dict =c
else:
_a : Any =c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None:
if point:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for item in point:
if not isinstance(_UpperCAmelCase ,(int, float) ):
_a : str =(
"""Expected a list of numbers as input, found """
F"{type(_UpperCAmelCase ).__name__}"
)
raise TypeError(_UpperCAmelCase )
else:
_a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}"
raise TypeError(_UpperCAmelCase )
else:
raise ValueError("""Missing an input""" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
A__: Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 694 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : int =order
# a_{0} ... a_{k}
_a : Optional[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_a : Tuple =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_a : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
_a : Tuple =[0.0] * self.order
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < self.order:
_a : int =[1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : int =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : Optional[Any] =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
_a : List[str] =a_coeffs
_a : Union[str, Any] =b_coeffs
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float:
'''simple docstring'''
_a : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_a : str =self.input_history[:-1]
_a : Optional[Any] =self.output_history[:-1]
_a : Optional[int] =sample
_a : Tuple =result
return result
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A__: List[Any] = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: int = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
A__: Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf )
_a : int =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_a : Tuple =new_cost_f
_a : Optional[Any] =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_a : str =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int:
_a : Optional[Any] =-1
_a : List[str] =set()
_a : Optional[int] =set()
_a : Optional[int] ={source: 0}
_a : List[str] ={destination: 0}
_a : Union[str, Any] ={source: None}
_a : Dict ={destination: None}
_a : PriorityQueue[Any] =PriorityQueue()
_a : PriorityQueue[Any] =PriorityQueue()
_a : Optional[int] =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_a , _a : str =queue_forward.get()
visited_forward.add(_UpperCAmelCase )
_a , _a : List[Any] =queue_backward.get()
visited_backward.add(_UpperCAmelCase )
_a : int =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_a : Any =shortest_distance
return shortest_path_distance
A__: Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
A__: str = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
_a : str =FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
_a : Optional[int] =AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
_a : int ="""The dog is cute and lives in the garden house"""
_a : str =jnp.array([tokenizer.encode(SCREAMING_SNAKE_CASE )] )
_a : int =(1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
_a : List[Any] =jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
_a : str =model(SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 694 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int:
return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 694 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
A__: str = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
A__: int = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
A__: Tuple = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :int = CHRF.CHAR_ORDER , SCREAMING_SNAKE_CASE :int = CHRF.WORD_ORDER , SCREAMING_SNAKE_CASE :int = CHRF.BETA , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :bool = False , ) -> str:
'''simple docstring'''
_a : Optional[Any] =len(references[0] )
if any(len(SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
_a : int =[[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE )]
_a : List[str] =CHRF(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =sb_chrf.corpus_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Optional[Any] = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "gpt_bigcode"
__UpperCamelCase : List[str] = ["past_key_values"]
__UpperCamelCase : List[str] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self :Any , SCREAMING_SNAKE_CASE :Any=5_0_2_5_7 , SCREAMING_SNAKE_CASE :List[str]=1_0_2_4 , SCREAMING_SNAKE_CASE :str=7_6_8 , SCREAMING_SNAKE_CASE :str=1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=1_2 , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :Optional[Any]="gelu_pytorch_tanh" , SCREAMING_SNAKE_CASE :str=0.1 , SCREAMING_SNAKE_CASE :int=0.1 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Union[str, Any]=1e-5 , SCREAMING_SNAKE_CASE :str=0.02 , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=5_0_2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=5_0_2_5_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=True , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :List[Any]=True , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> int:
'''simple docstring'''
_a : Tuple =vocab_size
_a : List[str] =n_positions
_a : str =n_embd
_a : Dict =n_layer
_a : Dict =n_head
_a : Dict =n_inner
_a : int =activation_function
_a : Union[str, Any] =resid_pdrop
_a : List[Any] =embd_pdrop
_a : Optional[Any] =attn_pdrop
_a : List[str] =layer_norm_epsilon
_a : Dict =initializer_range
_a : List[str] =scale_attn_weights
_a : Any =use_cache
_a : List[str] =attention_softmax_in_fpaa
_a : Any =scale_attention_softmax_in_fpaa
_a : Tuple =multi_query
_a : Optional[int] =bos_token_id
_a : Union[str, Any] =eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 694 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple:
'''simple docstring'''
_a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8}
_a : int =parent
_a : Optional[int] =batch_size
_a : List[str] =num_channels
_a : Optional[Any] =image_size
_a : int =min_resolution
_a : str =max_resolution
_a : str =do_resize
_a : Tuple =size
_a : Tuple =do_normalize
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Any =ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
_a : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
_a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
_a : Dict =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : List[Any] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict()
_a : Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict()
_a : Union[str, Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_a : Dict =Image.open(dataset[4]["""file"""] )
_a : Optional[int] =Image.open(dataset[5]["""file"""] )
_a : Optional[Any] =[imagea, imagea]
return images
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_a : int =prepare_images()
# test non-batched
_a : Dict =image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
_a : Optional[int] =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE )
# test batched
_a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
_a : Any =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
| 694 | 1 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
A__: Any = logging.getLogger(__name__)
class A__ ( UpperCAmelCase__ ):
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
_a : Union[str, Any] =self.layer[current_layer](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , head_mask[current_layer] )
_a : int =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCAmelCase__ , )
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
_a : Tuple =BertEncoderWithPabee(SCREAMING_SNAKE_CASE )
self.init_weights()
_a : List[Any] =0
_a : List[Any] =0
_a : Any =0
_a : Optional[Any] =0
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> str:
'''simple docstring'''
_a : Any =threshold
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Dict:
'''simple docstring'''
_a : Dict =patience
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
_a : int =0
_a : Any =0
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
_a : Tuple =self.inference_layers_num / self.inference_instances_num
_a : Dict =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(SCREAMING_SNAKE_CASE )
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Any=None , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :str=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :Tuple=False , ) -> Tuple:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_a : int =input_ids.size()
elif inputs_embeds is not None:
_a : str =inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_a : Tuple =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_a : str =torch.ones(SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
if token_type_ids is None:
_a : List[str] =torch.zeros(SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_a : torch.Tensor =self.get_extended_attention_mask(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_a , _a , _a : List[Any] =encoder_hidden_states.size()
_a : Any =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_a : str =torch.ones(SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
_a : Optional[Any] =self.invert_attention_mask(SCREAMING_SNAKE_CASE )
else:
_a : Optional[int] =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_a : Union[str, Any] =self.get_head_mask(SCREAMING_SNAKE_CASE , self.config.num_hidden_layers )
_a : Any =self.embeddings(
input_ids=SCREAMING_SNAKE_CASE , position_ids=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , inputs_embeds=SCREAMING_SNAKE_CASE )
_a : Tuple =embedding_output
if self.training:
_a : Tuple =[]
for i in range(self.config.num_hidden_layers ):
_a : Any =self.encoder.adaptive_forward(
SCREAMING_SNAKE_CASE , current_layer=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE )
_a : Optional[int] =self.pooler(SCREAMING_SNAKE_CASE )
_a : Optional[int] =output_layers[i](output_dropout(SCREAMING_SNAKE_CASE ) )
res.append(SCREAMING_SNAKE_CASE )
elif self.patience == 0: # Use all layers for inference
_a : List[Any] =self.encoder(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )
_a : Dict =self.pooler(encoder_outputs[0] )
_a : List[Any] =[output_layers[self.config.num_hidden_layers - 1](SCREAMING_SNAKE_CASE )]
else:
_a : Any =0
_a : List[str] =None
_a : Tuple =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_a : Optional[int] =self.encoder.adaptive_forward(
SCREAMING_SNAKE_CASE , current_layer=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE )
_a : Optional[Any] =self.pooler(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =output_layers[i](SCREAMING_SNAKE_CASE )
if regression:
_a : Union[str, Any] =logits.detach()
if patient_result is not None:
_a : List[str] =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_a : Optional[Any] =0
else:
_a : List[Any] =logits.detach().argmax(dim=1 )
if patient_result is not None:
_a : Union[str, Any] =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(SCREAMING_SNAKE_CASE ) ):
patient_counter += 1
else:
_a : Optional[int] =0
_a : Optional[Any] =logits
if patient_counter == self.patience:
break
_a : Optional[Any] =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCAmelCase__ , )
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
_a : str =config.num_labels
_a : Any =BertModelWithPabee(SCREAMING_SNAKE_CASE )
_a : List[Any] =nn.Dropout(config.hidden_dropout_prob )
_a : Any =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :str=None , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :Any=None , SCREAMING_SNAKE_CASE :str=None , SCREAMING_SNAKE_CASE :Tuple=None , ) -> List[str]:
'''simple docstring'''
_a : Any =self.bert(
input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , position_ids=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE , inputs_embeds=SCREAMING_SNAKE_CASE , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_a : Optional[Any] =(logits[-1],)
if labels is not None:
_a : Any =None
_a : Union[str, Any] =0
for ix, logits_item in enumerate(SCREAMING_SNAKE_CASE ):
if self.num_labels == 1:
# We are doing regression
_a : Union[str, Any] =MSELoss()
_a : Any =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_a : int =CrossEntropyLoss()
_a : str =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_a : Optional[int] =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_a : str =(total_loss / total_weights,) + outputs
return outputs
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool:
_a : Optional[int] =len(_UpperCAmelCase )
_a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_a : Any =True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_a : int =False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_a : Optional[Any] =subset[i - 1][j]
if arr[i - 1] <= j:
_a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
A__: int = logging.get_logger(__name__)
# General docstring
A__: Dict = '''RegNetConfig'''
# Base docstring
A__: Union[str, Any] = '''facebook/regnet-y-040'''
A__: Any = [1, 1088, 7, 7]
# Image classification docstring
A__: List[Any] = '''facebook/regnet-y-040'''
A__: int = '''tabby, tabby cat'''
A__: Optional[Any] = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A__ ( tf.keras.layers.Layer ):
def __init__( self :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 1 , SCREAMING_SNAKE_CASE :int = 1 , SCREAMING_SNAKE_CASE :Optional[str] = "relu" , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_a : Union[str, Any] =tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_a : Optional[Any] =tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE , kernel_size=SCREAMING_SNAKE_CASE , strides=SCREAMING_SNAKE_CASE , padding="""VALID""" , groups=SCREAMING_SNAKE_CASE , use_bias=SCREAMING_SNAKE_CASE , name="""convolution""" , )
_a : Tuple =tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
_a : Union[str, Any] =ACTaFN[activation] if activation is not None else tf.identity
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Tuple ) -> List[str]:
'''simple docstring'''
_a : Dict =self.convolution(self.padding(SCREAMING_SNAKE_CASE ) )
_a : Optional[int] =self.normalization(SCREAMING_SNAKE_CASE )
_a : int =self.activation(SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( tf.keras.layers.Layer ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :RegNetConfig , **SCREAMING_SNAKE_CASE :Dict ) -> Dict:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Any =config.num_channels
_a : List[Any] =TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :str ) -> str:
'''simple docstring'''
_a : List[str] =shape_list(SCREAMING_SNAKE_CASE )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_a : Union[str, Any] =tf.transpose(SCREAMING_SNAKE_CASE , perm=(0, 2, 3, 1) )
_a : Optional[Any] =self.embedder(SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( tf.keras.layers.Layer ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int = 2 , **SCREAMING_SNAKE_CASE :Optional[int] ) -> Dict:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Tuple =tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE , kernel_size=1 , strides=SCREAMING_SNAKE_CASE , use_bias=SCREAMING_SNAKE_CASE , name="""convolution""" )
_a : int =tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :tf.Tensor , SCREAMING_SNAKE_CASE :bool = False ) -> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE ) , training=SCREAMING_SNAKE_CASE )
class A__ ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , **SCREAMING_SNAKE_CASE :Union[str, Any] ) -> str:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : int =tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE , name="""pooler""" )
_a : Tuple =[
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> List[Any]:
'''simple docstring'''
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_a : Union[str, Any] =self.pooler(SCREAMING_SNAKE_CASE )
for layer_module in self.attention:
_a : int =layer_module(SCREAMING_SNAKE_CASE )
_a : List[Any] =hidden_state * pooled
return hidden_state
class A__ ( tf.keras.layers.Layer ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :RegNetConfig , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int = 1 , **SCREAMING_SNAKE_CASE :int ) -> str:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =in_channels != out_channels or stride != 1
_a : str =max(1 , out_channels // config.groups_width )
_a : Dict =(
TFRegNetShortCut(SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_a : Optional[Any] =[
TFRegNetConvLayer(SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , groups=SCREAMING_SNAKE_CASE , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE , kernel_size=1 , activation=SCREAMING_SNAKE_CASE , name="""layer.2""" ),
]
_a : Any =ACTaFN[config.hidden_act]
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =hidden_state
for layer_module in self.layers:
_a : List[str] =layer_module(SCREAMING_SNAKE_CASE )
_a : Dict =self.shortcut(SCREAMING_SNAKE_CASE )
hidden_state += residual
_a : List[Any] =self.activation(SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( tf.keras.layers.Layer ):
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :RegNetConfig , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int = 1 , **SCREAMING_SNAKE_CASE :Any ) -> Tuple:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =in_channels != out_channels or stride != 1
_a : List[Any] =max(1 , out_channels // config.groups_width )
_a : List[str] =(
TFRegNetShortCut(SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
_a : Union[str, Any] =[
TFRegNetConvLayer(SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , groups=SCREAMING_SNAKE_CASE , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(SCREAMING_SNAKE_CASE , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE , kernel_size=1 , activation=SCREAMING_SNAKE_CASE , name="""layer.3""" ),
]
_a : List[str] =ACTaFN[config.hidden_act]
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
_a : Union[str, Any] =hidden_state
for layer_module in self.layers:
_a : Any =layer_module(SCREAMING_SNAKE_CASE )
_a : Dict =self.shortcut(SCREAMING_SNAKE_CASE )
hidden_state += residual
_a : List[str] =self.activation(SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( tf.keras.layers.Layer ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :RegNetConfig , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int = 2 , SCREAMING_SNAKE_CASE :int = 2 , **SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : int =TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
_a : Union[str, Any] =[
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , name="""layers.0""" ),
*[layer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for layer_module in self.layers:
_a : List[Any] =layer_module(SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( tf.keras.layers.Layer ):
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :RegNetConfig , **SCREAMING_SNAKE_CASE :Any ) -> Tuple:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =[]
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
_a : Any =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , depth=SCREAMING_SNAKE_CASE , name=f"stages.{i+1}" ) )
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :tf.Tensor , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :bool = True ) -> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
_a : Tuple =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_a : Union[str, Any] =hidden_states + (hidden_state,)
_a : Union[str, Any] =stage_module(SCREAMING_SNAKE_CASE )
if output_hidden_states:
_a : Union[str, Any] =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE , hidden_states=SCREAMING_SNAKE_CASE )
@keras_serializable
class A__ ( tf.keras.layers.Layer ):
__UpperCamelCase : Optional[int] = RegNetConfig
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , **SCREAMING_SNAKE_CASE :List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : int =config
_a : Any =TFRegNetEmbeddings(SCREAMING_SNAKE_CASE , name="""embedder""" )
_a : Dict =TFRegNetEncoder(SCREAMING_SNAKE_CASE , name="""encoder""" )
_a : int =tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE , name="""pooler""" )
@unpack_inputs
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :tf.Tensor , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_a : int =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
_a : Optional[Any] =self.embedder(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
_a : List[Any] =self.encoder(
SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
_a : Any =encoder_outputs[0]
_a : Tuple =self.pooler(SCREAMING_SNAKE_CASE )
# Change to NCHW output format have uniformity in the modules
_a : int =tf.transpose(SCREAMING_SNAKE_CASE , perm=(0, 3, 1, 2) )
_a : List[Any] =tf.transpose(SCREAMING_SNAKE_CASE , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_a : List[Any] =tuple([tf.transpose(SCREAMING_SNAKE_CASE , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE , pooler_output=SCREAMING_SNAKE_CASE , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Optional[Any] = RegNetConfig
__UpperCamelCase : Optional[int] = "regnet"
__UpperCamelCase : List[Any] = "pixel_values"
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
A__: int = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
A__: str = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , UpperCAmelCase__ , )
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :RegNetConfig , *SCREAMING_SNAKE_CASE :str , **SCREAMING_SNAKE_CASE :Optional[int] ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : List[Any] =TFRegNetMainLayer(SCREAMING_SNAKE_CASE , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :tf.Tensor , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :int=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
_a : Any =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Dict =return_dict if return_dict is not None else self.config.use_return_dict
_a : Dict =self.regnet(
pixel_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase__ , )
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :RegNetConfig , *SCREAMING_SNAKE_CASE :int , **SCREAMING_SNAKE_CASE :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : List[Any] =config.num_labels
_a : List[str] =TFRegNetMainLayer(SCREAMING_SNAKE_CASE , name="""regnet""" )
# classification head
_a : Union[str, Any] =[
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :tf.Tensor = None , SCREAMING_SNAKE_CASE :tf.Tensor = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Tuple=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
_a : Optional[int] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : int =return_dict if return_dict is not None else self.config.use_return_dict
_a : Optional[int] =self.regnet(
SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
_a : Tuple =outputs.pooler_output if return_dict else outputs[1]
_a : Optional[int] =self.classifier[0](SCREAMING_SNAKE_CASE )
_a : List[str] =self.classifier[1](SCREAMING_SNAKE_CASE )
_a : Tuple =None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE )
if not return_dict:
_a : Any =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int:
_a : Optional[Any] =[]
_a , _a : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_a , _a : Optional[Any] =b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
while b:
_a , _a : Any =b, a % b
return a
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return a if b == 0 else euclidean_gcd_recursive(_UpperCAmelCase ,a % b )
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 ,5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 ,3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 ,3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 ,6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 ,3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 ,5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 ,3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 ,3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 ,6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 ,3 )}" )
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A__: Any = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
_a : Dict =_ask_options(
"""In which compute environment are you running?""" ,["""This machine""", """AWS (Amazon SageMaker)"""] ,_convert_compute_environment ,)
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_a : Tuple =get_sagemaker_input()
else:
_a : Tuple =get_cluster_input()
return config
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any]=None ) -> Union[str, Any]:
if subparsers is not None:
_a : Tuple =subparsers.add_parser("""config""" ,description=_UpperCAmelCase )
else:
_a : Dict =argparse.ArgumentParser("""Accelerate config command""" ,description=_UpperCAmelCase )
parser.add_argument(
"""--config_file""" ,default=_UpperCAmelCase ,help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) ,)
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> List[str]:
_a : Optional[Any] =get_user_input()
if args.config_file is not None:
_a : Optional[Any] =args.config_file
else:
if not os.path.isdir(_UpperCAmelCase ):
os.makedirs(_UpperCAmelCase )
_a : int =default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(_UpperCAmelCase )
else:
config.to_yaml_file(_UpperCAmelCase )
print(F"accelerate configuration saved at {config_file}" )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
_a : str =config_command_parser()
_a : Dict =parser.parse_args()
config_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
# Initialise PyTorch model
_a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__: int = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Any = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
A__: Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Union[str, Any] = '''▁'''
A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A__: Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : Dict =vocab_file
_a : int =monolingual_vocab_file
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : List[Any] ={}
_a : List[str] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[Any] =cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_a : int =line.strip().split()[0]
_a : str =len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[int] =len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =self.__dict__.copy()
_a : Optional[Any] =None
_a : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
_a : List[str] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple ={}
_a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
_a : int =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[str] =[self.sep_token_id]
_a : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : int =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Any =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
A__: List[str] = (720, 1280) # Height, Width
A__: List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
A__: int = 1 / 100
A__: Union[str, Any] = ''''''
A__: Any = ''''''
A__: Dict = ''''''
A__: str = 250
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_a , _a : List[str] =get_dataset(_UpperCAmelCase ,_UpperCAmelCase )
for index in range(_UpperCAmelCase ):
_a : Tuple =random.sample(range(len(_UpperCAmelCase ) ) ,4 )
_a , _a , _a : Union[str, Any] =update_image_and_anno(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,filter_scale=_UpperCAmelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_a : List[Any] =random_chars(32 )
_a : Optional[Any] =path.split(os.sep )[-1].rsplit(""".""" ,1 )[0]
_a : str =F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" ,_UpperCAmelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
_a : List[Any] =[]
for anno in new_annos:
_a : List[Any] =anno[3] - anno[1]
_a : str =anno[4] - anno[2]
_a : Union[str, Any] =anno[1] + width / 2
_a : int =anno[2] + height / 2
_a : Optional[int] =F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(_UpperCAmelCase )
with open(F"{file_root}.txt" ,"""w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> tuple[list, list]:
_a : Union[str, Any] =[]
_a : Any =[]
for label_file in glob.glob(os.path.join(_UpperCAmelCase ,"""*.txt""" ) ):
_a : List[Any] =label_file.split(os.sep )[-1].rsplit(""".""" ,1 )[0]
with open(_UpperCAmelCase ) as in_file:
_a : str =in_file.readlines()
_a : Tuple =os.path.join(_UpperCAmelCase ,F"{label_name}.jpg" )
_a : Optional[int] =[]
for obj_list in obj_lists:
_a : Optional[int] =obj_list.rstrip("""\n""" ).split(""" """ )
_a : Tuple =float(obj[1] ) - float(obj[3] ) / 2
_a : Union[str, Any] =float(obj[2] ) - float(obj[4] ) / 2
_a : str =float(obj[1] ) + float(obj[3] ) / 2
_a : Union[str, Any] =float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_UpperCAmelCase )
labels.append(_UpperCAmelCase )
return img_paths, labels
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ,_UpperCAmelCase : list[int] ,_UpperCAmelCase : tuple[int, int] ,_UpperCAmelCase : tuple[float, float] ,_UpperCAmelCase : float = 0.0 ,) -> tuple[list, list, str]:
_a : Optional[Any] =np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_a : Optional[Any] =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_a : Optional[Any] =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_a : int =int(scale_x * output_size[1] )
_a : Dict =int(scale_y * output_size[0] )
_a : Tuple =[]
_a : Dict =[]
for i, index in enumerate(_UpperCAmelCase ):
_a : List[str] =all_img_list[index]
path_list.append(_UpperCAmelCase )
_a : List[Any] =all_annos[index]
_a : str =cva.imread(_UpperCAmelCase )
if i == 0: # top-left
_a : List[Any] =cva.resize(_UpperCAmelCase ,(divid_point_x, divid_point_y) )
_a : Tuple =img
for bbox in img_annos:
_a : List[Any] =bbox[1] * scale_x
_a : Dict =bbox[2] * scale_y
_a : List[Any] =bbox[3] * scale_x
_a : Tuple =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_a : List[Any] =cva.resize(_UpperCAmelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_a : Any =img
for bbox in img_annos:
_a : Union[str, Any] =scale_x + bbox[1] * (1 - scale_x)
_a : Optional[Any] =bbox[2] * scale_y
_a : Dict =scale_x + bbox[3] * (1 - scale_x)
_a : str =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_a : List[Any] =cva.resize(_UpperCAmelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_a : Optional[int] =img
for bbox in img_annos:
_a : Tuple =bbox[1] * scale_x
_a : Union[str, Any] =scale_y + bbox[2] * (1 - scale_y)
_a : List[str] =bbox[3] * scale_x
_a : str =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_a : Any =cva.resize(
_UpperCAmelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_a : Any =img
for bbox in img_annos:
_a : str =scale_x + bbox[1] * (1 - scale_x)
_a : Tuple =scale_y + bbox[2] * (1 - scale_y)
_a : List[str] =scale_x + bbox[3] * (1 - scale_x)
_a : List[str] =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_a : Optional[int] =[
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_a : List[str] =ascii_lowercase + digits
return "".join(random.choice(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 694 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 694 | 1 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class A__ :
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :int | None = None ) -> Optional[int]:
'''simple docstring'''
_a : int =value
_a : Node | None =None # Added in order to delete a node easier
_a : Node | None =None
_a : Node | None =None
def __repr__( self :Dict ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"{self.value}": (self.left, self.right)} , indent=1 )
class A__ :
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Node | None = None ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =root
def __str__( self :Optional[Any] ) -> str:
'''simple docstring'''
return str(self.root )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Node , SCREAMING_SNAKE_CASE :Node | None ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
_a : Optional[int] =node.parent
if node.parent is not None: # reset its parent
if self.is_right(SCREAMING_SNAKE_CASE ): # If it is the right children
_a : List[Any] =new_children
else:
_a : List[Any] =new_children
else:
_a : Tuple =new_children
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Node ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __UpperCAmelCase ( self :Any ) -> bool:
'''simple docstring'''
return self.root is None
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> None:
'''simple docstring'''
_a : Optional[Any] =Node(SCREAMING_SNAKE_CASE ) # create a new Node
if self.empty(): # if Tree is empty
_a : Dict =new_node # set its root
else: # Tree is not empty
_a : Optional[int] =self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_a : str =new_node # We insert the new node in a leaf
break
else:
_a : List[str] =parent_node.left
else:
if parent_node.right is None:
_a : Tuple =new_node
break
else:
_a : str =parent_node.right
_a : Optional[Any] =parent_node
def __UpperCAmelCase ( self :Optional[Any] , *SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
for value in values:
self.__insert(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Any ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
_a : Union[str, Any] =self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_a : List[Any] =node.left if value < node.value else node.right
return node
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
_a : Optional[Any] =self.root
if not self.empty():
while node.right is not None:
_a : Optional[int] =node.right
return node
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
_a : Optional[int] =self.root
if self.root is None:
return None
if not self.empty():
_a : Optional[Any] =self.root
while node.left is not None:
_a : str =node.left
return node
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : Optional[int] =self.search(SCREAMING_SNAKE_CASE ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif node.left is None: # Has only right children
self.__reassign_nodes(SCREAMING_SNAKE_CASE , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(SCREAMING_SNAKE_CASE , node.left )
else:
_a : Any =self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_a : Tuple =(
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Node | None ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any]=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :list , SCREAMING_SNAKE_CASE :Node | None ) -> None:
'''simple docstring'''
if node:
self.inorder(SCREAMING_SNAKE_CASE , node.left )
arr.append(node.value )
self.inorder(SCREAMING_SNAKE_CASE , node.right )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Node ) -> int:
'''simple docstring'''
_a : list[int] =[]
self.inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # append all values to list using inorder traversal
return arr[k - 1]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Node | None ) -> list[Node]:
_a : List[str] =[]
if curr_node is not None:
_a : List[str] =postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def SCREAMING_SNAKE_CASE_ ( ) -> None:
_a : Dict =(8, 3, 6, 1, 10, 14, 13, 4, 7)
_a : Optional[Any] =BinarySearchTree()
for i in testlist:
t.insert(_UpperCAmelCase )
# Prints all the elements of the list in order traversal
print(_UpperCAmelCase )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ ,t.get_max().value ) # type: ignore
print("""Min Value: """ ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_UpperCAmelCase )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list[list]:
_a : Dict =current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
_a : Any =row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
_a : Any =column
continue
_a : Union[str, Any] =column / magnitude
# Subtract to cancel term
_a : Optional[Any] =current_set[0]
_a : List[Any] =[first_row]
_a : Tuple =current_set[1::]
for row in current_set:
_a : Any =[]
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a : List[str] =final_set[0]
_a : Tuple =[]
_a : Tuple =[]
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a : str =simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,_UpperCAmelCase )
_a : List[Any] =resultant
return final_set
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[list] ) -> list:
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
_a : str =len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase ,(int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
_a : str =equations.copy()
if any(0 in row for row in data_set ):
_a : Optional[int] =data_set.copy()
_a : str =[]
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
_a : List[Any] =data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 ,_UpperCAmelCase )
_a : Dict =data_set.copy()
_a : Any =simplify(_UpperCAmelCase )
_a : Any =simplified[::-1]
_a : list =[]
for row in simplified:
_a : Optional[Any] =row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a : Any =row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
_a : List[str] =temp_row[1::]
_a : int =temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
_a : Tuple =[]
for item in solutions:
final.append(float(round(_UpperCAmelCase ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__: int = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 694 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class A__ ( UpperCAmelCase__ ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Any:
'''simple docstring'''
_a : Tuple =dataset
_a : Union[str, Any] =process
_a : Dict =params
def __len__( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self :Any , SCREAMING_SNAKE_CASE :str ) -> str:
'''simple docstring'''
_a : Dict =self.dataset[i]
_a : str =self.process(SCREAMING_SNAKE_CASE , **self.params )
return processed
class A__ ( UpperCAmelCase__ ):
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[Any]=None ) -> str:
'''simple docstring'''
_a : Optional[int] =loader
_a : Any =infer
_a : str =params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_a : Dict =None
_a : Any =loader_batch_size
# Internal bookkeeping
_a : List[Any] =None
_a : Optional[Any] =None
def __len__( self :Any ) -> int:
'''simple docstring'''
return len(self.loader )
def __iter__( self :str ) -> Dict:
'''simple docstring'''
_a : Optional[Any] =iter(self.loader )
return self
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_a : Union[str, Any] =self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_a : Any ={}
for k, element in self._loader_batch_data.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Convert ModelOutput to tuple first
_a : int =element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_a : List[Any] =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_a : Optional[Any] =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_a : Optional[int] =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_a : List[str] =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_a : Tuple =None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_a : Optional[int] =element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_a : List[Any] =np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_a : Any =element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_a : str =self._loader_batch_data.__class__(SCREAMING_SNAKE_CASE )
self._loader_batch_index += 1
return result
def __UpperCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_a : int =next(self.iterator )
_a : Any =self.infer(SCREAMING_SNAKE_CASE , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
_a : Union[str, Any] =processed
else:
_a : List[str] =list(processed.keys() )[0]
_a : List[Any] =processed[key]
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_a : Optional[int] =len(SCREAMING_SNAKE_CASE )
else:
_a : Union[str, Any] =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_a : List[str] =observed_batch_size
# Setting internal index to unwrap the batch
_a : Tuple =processed
_a : int =0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Dict=None ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __iter__( self :List[str] ) -> int:
'''simple docstring'''
_a : Dict =iter(self.loader )
_a : Tuple =None
return self
def __UpperCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
if self.subiterator is None:
_a : int =self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_a : Tuple =next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_a : Any =self.infer(next(self.iterator ) , **self.params )
_a : List[Any] =next(self.subiterator )
return processed
class A__ ( UpperCAmelCase__ ):
def __iter__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_a : List[str] =iter(self.loader )
return self
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_a : int =False
_a : Optional[int] =[]
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_a : List[Any] =self.loader_batch_item()
_a : str =item.pop("""is_last""" )
accumulator.append(SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
while not is_last:
_a : str =self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
_a : str =processed
else:
_a : Optional[Any] =list(processed.keys() )[0]
_a : Optional[Any] =processed[key]
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_a : Optional[int] =len(SCREAMING_SNAKE_CASE )
else:
_a : Optional[int] =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_a : Optional[int] =observed_batch_size
_a : Optional[Any] =processed
_a : List[Any] =0
while self._loader_batch_index < self.loader_batch_size:
_a : Union[str, Any] =self.loader_batch_item()
_a : str =item.pop("""is_last""" )
accumulator.append(SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
else:
_a : List[str] =processed
_a : List[Any] =item.pop("""is_last""" )
accumulator.append(SCREAMING_SNAKE_CASE )
return accumulator
class A__ ( UpperCAmelCase__ ):
def __init__( self :Any , SCREAMING_SNAKE_CASE :Dataset , SCREAMING_SNAKE_CASE :str ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =dataset
_a : List[str] =key
def __len__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Any:
'''simple docstring'''
return self.dataset[i][self.key]
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dataset , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> Optional[Any]:
'''simple docstring'''
_a : str =dataset
_a : Optional[int] =keya
_a : Union[str, Any] =keya
def __len__( self :str ) -> str:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self :int , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "markuplm"
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Any =vocab_size
_a : List[str] =hidden_size
_a : List[str] =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Tuple =intermediate_size
_a : Optional[Any] =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : Any =max_position_embeddings
_a : List[Any] =type_vocab_size
_a : List[Any] =initializer_range
_a : List[Any] =layer_norm_eps
_a : Optional[int] =position_embedding_type
_a : List[Any] =use_cache
_a : List[str] =classifier_dropout
# additional properties
_a : int =max_depth
_a : Union[str, Any] =max_xpath_tag_unit_embeddings
_a : str =max_xpath_subs_unit_embeddings
_a : int =tag_pad_id
_a : List[Any] =subs_pad_id
_a : str =xpath_unit_hidden_size
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : NDArray[floataa] ,_UpperCAmelCase : NDArray[floataa] ,_UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,) -> list[float]:
_a , _a : Dict =coefficient_matrix.shape
_a , _a : List[Any] =constant_matrix.shape
if rowsa != colsa:
_a : Optional[Any] =F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(_UpperCAmelCase )
if colsa != 1:
_a : Optional[Any] =F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(_UpperCAmelCase )
if rowsa != rowsa:
_a : Tuple =(
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(_UpperCAmelCase )
if len(_UpperCAmelCase ) != rowsa:
_a : Union[str, Any] =(
"""Number of initial values must be equal to number of rows in coefficient """
F"matrix but received {len(_UpperCAmelCase )} and {rowsa}"
)
raise ValueError(_UpperCAmelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
_a : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
_a , _a : List[str] =table.shape
strictly_diagonally_dominant(_UpperCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_UpperCAmelCase ):
_a : Dict =[]
for row in range(_UpperCAmelCase ):
_a : Union[str, Any] =0
for col in range(_UpperCAmelCase ):
if col == row:
_a : int =table[row][col]
elif col == cols - 1:
_a : Union[str, Any] =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_a : List[str] =(temp + val) / denom
new_val.append(_UpperCAmelCase )
_a : Optional[int] =new_val
return [float(_UpperCAmelCase ) for i in new_val]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : NDArray[floataa] ) -> bool:
_a , _a : Any =table.shape
_a : int =True
for i in range(0 ,_UpperCAmelCase ):
_a : Tuple =0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A__: Union[str, Any] = logging.get_logger('''transformers.models.speecht5''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> Dict:
hf_model.apply_weight_norm()
_a : Any =checkpoint["""input_conv.weight_g"""]
_a : Union[str, Any] =checkpoint["""input_conv.weight_v"""]
_a : Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
_a : Optional[int] =checkpoint[F"upsamples.{i}.1.weight_g"]
_a : Optional[Any] =checkpoint[F"upsamples.{i}.1.weight_v"]
_a : List[Any] =checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a : Optional[int] =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
_a : Tuple =checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
_a : Union[str, Any] =checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
_a : Dict =checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
_a : Tuple =checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
_a : Dict =checkpoint["""output_conv.1.weight_g"""]
_a : str =checkpoint["""output_conv.1.weight_v"""]
_a : Union[str, Any] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Tuple=None ,) -> List[Any]:
if config_path is not None:
_a : str =SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
_a : str =SpeechTaHifiGanConfig()
_a : Tuple =SpeechTaHifiGan(_UpperCAmelCase )
_a : int =torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict =np.load(_UpperCAmelCase )
_a : Union[str, Any] =stats[0].reshape(-1 )
_a : Any =stats[1].reshape(-1 )
_a : Tuple =torch.from_numpy(_UpperCAmelCase ).float()
_a : List[str] =torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A__: Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A__: Optional[int] = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[Any] = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
A__: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
'''simple docstring'''
_a : List[str] =None
_a : Optional[Any] =None
_a : str =graph
self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =len(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =None
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
if sources is int:
_a : Tuple =[sources]
if sinks is int:
_a : Optional[int] =[sinks]
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
return
_a : Union[str, Any] =sources[0]
_a : Tuple =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:
_a : Tuple =0
for i in sources:
max_input_flow += sum(self.graph[i] )
_a : List[Any] =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_a : Any =max_input_flow
_a : List[str] =0
_a : List[str] =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_a : str =max_input_flow
_a : Optional[Any] =size - 1
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int:
'''simple docstring'''
_a : Tuple =algorithm(self )
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =flow_network
_a : List[Any] =flow_network.verticesCount
_a : str =flow_network.sourceIndex
_a : str =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_a : List[Any] =flow_network.graph
_a : Optional[int] =False
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_a : Any =True
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
# use this to save your result
_a : List[Any] =-1
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
_a : int =[[0] * self.verticies_count for i in range(self.verticies_count )]
_a : Union[str, Any] =[0] * self.verticies_count
_a : Optional[Any] =[0] * self.verticies_count
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_a : Tuple =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_a : List[Any] =0
while i < len(SCREAMING_SNAKE_CASE ):
_a : Any =vertices_list[i]
_a : str =self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) )
_a : List[str] =0
else:
i += 1
_a : Optional[int] =sum(self.preflow[self.source_index] )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.relabel(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
_a : List[str] =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]:
'''simple docstring'''
_a : int =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_a : Optional[Any] =self.heights[to_index]
if min_height is not None:
_a : Any =min_height + 1
if __name__ == "__main__":
A__: str = [0]
A__: Optional[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A__: Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A__: List[str] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}")
| 694 | 1 |
'''simple docstring'''
from random import randint, random
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : int = 5 ,) -> list:
_a : Tuple =[[-1] * number_of_cells] # Create a highway without any car
_a : List[Any] =0
_a : Optional[int] =max(_UpperCAmelCase ,0 )
while i < number_of_cells:
_a : Dict =(
randint(0 ,_UpperCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 ,max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : int ) -> int:
_a : Optional[Any] =0
_a : List[Any] =highway_now[car_index + 1 :]
for cell in range(len(_UpperCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_UpperCAmelCase ,-1 )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : float ,_UpperCAmelCase : int ) -> list:
_a : str =len(_UpperCAmelCase )
# Beforce calculations, the highway is empty
_a : Optional[Any] =[-1] * number_of_cells
for car_index in range(_UpperCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_a : str =min(highway_now[car_index] + 1 ,_UpperCAmelCase )
# Number of empty cell before the next car
_a : Union[str, Any] =get_distance(_UpperCAmelCase ,_UpperCAmelCase ) - 1
# We can't have the car causing an accident
_a : int =min(next_highway[car_index] ,_UpperCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_a : List[str] =max(next_highway[car_index] - 1 ,0 )
return next_highway
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : int ,_UpperCAmelCase : float ,_UpperCAmelCase : int ) -> list:
_a : List[str] =len(highway[0] )
for i in range(_UpperCAmelCase ):
_a : Union[str, Any] =update(highway[i] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : List[str] =[-1] * number_of_cells
for car_index in range(_UpperCAmelCase ):
_a : Optional[Any] =next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_a : Tuple =(car_index + speed) % number_of_cells
# Commit the change of position
_a : Dict =speed
highway.append(_UpperCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
A__: Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 694 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
_a : Dict =tempfile.mkdtemp()
# fmt: off
_a : Optional[int] =["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_a : Tuple =dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
_a : str =["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_a : Dict ={"""unk_token""": """<unk>"""}
_a : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE ) )
_a : Optional[int] ={
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_a : Dict =os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] , **SCREAMING_SNAKE_CASE :List[Any] ) -> Any:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , **SCREAMING_SNAKE_CASE :Union[str, Any] ) -> str:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , **SCREAMING_SNAKE_CASE :List[str] ) -> Dict:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a : List[str] =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
_a : Dict =self.get_tokenizer()
_a : List[str] =self.get_rust_tokenizer()
_a : List[Any] =self.get_image_processor()
_a : Dict =CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
_a : Tuple =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
_a : List[str] =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
_a : int =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : List[Any] =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_a : Dict =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
_a : List[Any] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : Any =self.get_image_processor()
_a : List[str] =self.get_tokenizer()
_a : Union[str, Any] =CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
_a : Tuple =self.prepare_image_inputs()
_a : Optional[int] =image_processor(SCREAMING_SNAKE_CASE , return_tensors="""np""" )
_a : Dict =processor(images=SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
_a : Tuple =self.get_image_processor()
_a : Optional[int] =self.get_tokenizer()
_a : Dict =CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
_a : Tuple ="""lower newer"""
_a : Optional[Any] =processor(text=SCREAMING_SNAKE_CASE )
_a : Optional[int] =tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.get_image_processor()
_a : Union[str, Any] =self.get_tokenizer()
_a : Union[str, Any] =CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
_a : int ="""lower newer"""
_a : Optional[Any] =self.prepare_image_inputs()
_a : Any =processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def __UpperCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =self.get_image_processor()
_a : List[str] =self.get_tokenizer()
_a : Dict =CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
_a : int =self.prepare_image_inputs()
_a : Any =self.prepare_image_inputs()
_a : str =processor(images=SCREAMING_SNAKE_CASE , visual_prompt=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : str =self.get_image_processor()
_a : Optional[int] =self.get_tokenizer()
_a : Union[str, Any] =CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
_a : List[Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : Dict =processor.batch_decode(SCREAMING_SNAKE_CASE )
_a : Dict =tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(125.50, 0.05) = }")
| 694 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str]=7 , SCREAMING_SNAKE_CASE :int=3 , SCREAMING_SNAKE_CASE :Any=1_8 , SCREAMING_SNAKE_CASE :str=3_0 , SCREAMING_SNAKE_CASE :Dict=4_0_0 , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , SCREAMING_SNAKE_CASE :Union[str, Any]=True , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :str=True , SCREAMING_SNAKE_CASE :int=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE :List[str]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE :int=False , ) -> int:
'''simple docstring'''
_a : Dict =size if size is not None else {"""height""": 2_0, """width""": 2_0}
_a : List[str] =crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
_a : Optional[int] =parent
_a : int =batch_size
_a : int =num_channels
_a : Union[str, Any] =image_size
_a : Dict =min_resolution
_a : Any =max_resolution
_a : Dict =do_resize
_a : Optional[int] =size
_a : int =do_center_crop
_a : int =crop_size
_a : List[str] =do_normalize
_a : Optional[int] =image_mean
_a : str =image_std
_a : Tuple =do_reduce_labels
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_a : Tuple =load_dataset("""hf-internal-testing/fixtures_ade20k""" ,split="""test""" )
_a : int =Image.open(dataset[0]["""file"""] )
_a : Optional[int] =Image.open(dataset[1]["""file"""] )
return image, map
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
_a : Any =load_dataset("""hf-internal-testing/fixtures_ade20k""" ,split="""test""" )
_a : int =Image.open(ds[0]["""file"""] )
_a : str =Image.open(ds[1]["""file"""] )
_a : Union[str, Any] =Image.open(ds[2]["""file"""] )
_a : Optional[Any] =Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Optional[int] = BeitImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
_a : Any =BeitImageProcessingTester(self )
@property
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : Any =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_center_crop""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """center_crop""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """image_std""" ) )
def __UpperCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
_a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_0, """width""": 2_0} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE )
_a : Optional[Any] =self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , crop_size=8_4 , reduce_labels=SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
# Initialize image_processing
_a : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_a : Optional[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_a : str =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
# Initialize image_processing
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a : List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_a : Union[str, Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_a : Any =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCAmelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
# Initialize image_processing
_a : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_a : Any =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_a : Any =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
# Initialize image_processing
_a : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
_a : str =[]
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_a : Dict =image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_5_5 )
# Test batched
_a : Union[str, Any] =image_processing(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_5_5 )
# Test not batched input (PIL images)
_a , _a : List[str] =prepare_semantic_single_inputs()
_a : int =image_processing(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_5_5 )
# Test batched input (PIL images)
_a , _a : Dict =prepare_semantic_batch_inputs()
_a : Dict =image_processing(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_5_5 )
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
# Initialize image_processing
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_a , _a : Tuple =prepare_semantic_single_inputs()
_a : List[Any] =image_processing(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 1_5_0 )
_a : Optional[Any] =True
_a : Union[str, Any] =image_processing(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_5_5 )
| 694 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A__ ( unittest.TestCase ):
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any=1_3 , SCREAMING_SNAKE_CASE :Any=7 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=9_9 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=5 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :int=3_7 , SCREAMING_SNAKE_CASE :Optional[Any]="gelu" , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :Dict=5_1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[Any]=0.02 , SCREAMING_SNAKE_CASE :int=4 , ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =parent
_a : List[str] =batch_size
_a : List[str] =seq_length
_a : List[Any] =is_training
_a : Optional[int] =use_attention_mask
_a : List[Any] =use_token_type_ids
_a : List[Any] =use_labels
_a : Optional[Any] =vocab_size
_a : str =hidden_size
_a : List[Any] =num_hidden_layers
_a : List[Any] =num_attention_heads
_a : Union[str, Any] =intermediate_size
_a : int =hidden_act
_a : List[str] =hidden_dropout_prob
_a : Optional[int] =attention_probs_dropout_prob
_a : Dict =max_position_embeddings
_a : Any =type_vocab_size
_a : str =type_sequence_label_size
_a : str =initializer_range
_a : List[str] =num_choices
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict =None
if self.use_attention_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
if self.use_token_type_ids:
_a : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
_a : Tuple =self.prepare_config_and_inputs()
_a , _a , _a , _a : List[Any] =config_and_inputs
_a : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
_a , _a , _a , _a : Optional[int] =config_and_inputs
_a : Tuple =True
_a : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self :str ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_a : Optional[int] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Dict =model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Any ) -> str:
'''simple docstring'''
_a : str =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : List[Any] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Dict =model(SCREAMING_SNAKE_CASE )[0]
_a : List[Any] =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_a : Any =np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=SCREAMING_SNAKE_CASE )
_a : Any =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Optional[int] =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A__: Optional[Any] = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
A__: List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A__: Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : str=None ) -> Union[str, Any]:
return field(default_factory=lambda: default ,metadata=_UpperCAmelCase )
@dataclass
class A__ :
__UpperCamelCase : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__UpperCamelCase : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__UpperCamelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
__UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__UpperCamelCase : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
__UpperCamelCase : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__UpperCamelCase : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__UpperCamelCase : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
__UpperCamelCase : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
__UpperCamelCase : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : np.ndarray ) -> float:
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : np.ndarray ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
_a : int =(
"""Wrong input data's dimensions... """
F"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(_UpperCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_a : Optional[int] =(
"""Wrong input data's shape... """
F"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(_UpperCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
_a : List[str] =(
"""Input data have different datatype... """
F"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(_UpperCAmelCase )
_a : Dict =[]
for value in value_array:
_a : List[Any] =euclidean(_UpperCAmelCase ,dataset[0] )
_a : Dict =dataset[0].tolist()
for dataset_value in dataset[1:]:
_a : str =euclidean(_UpperCAmelCase ,_UpperCAmelCase )
if dist > temp_dist:
_a : Optional[int] =temp_dist
_a : Optional[Any] =dataset_value.tolist()
answer.append([vector, dist] )
return answer
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : np.ndarray ) -> float:
return np.dot(_UpperCAmelCase ,_UpperCAmelCase ) / (norm(_UpperCAmelCase ) * norm(_UpperCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]:
'''simple docstring'''
_a : int =1.0 if scale is None else scale
_a : Optional[Any] =0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] )
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Tuple =args_dim
_a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
_a : Dict =domain_map
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]:
'''simple docstring'''
_a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE )
class A__ ( nn.Module ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int:
'''simple docstring'''
super().__init__()
_a : List[Any] =function
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
class A__ :
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None:
'''simple docstring'''
_a : Any =dim
_a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution:
'''simple docstring'''
_a : str =self._base_distribution(SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCAmelCase ( self :Any ) -> float:
'''simple docstring'''
return 0.0
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
_a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict:
'''simple docstring'''
_a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]:
'''simple docstring'''
_a : int =cls.squareplus(SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution:
'''simple docstring'''
_a , _a : Any =distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution:
'''simple docstring'''
_a , _a : Optional[int] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A__: int = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[Any] = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
A__: Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
A__: Optional[int] = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : str ) -> Union[str, Any]:
warnings.warn(_UpperCAmelCase ,_UpperCAmelCase )
requires_backends(_UpperCAmelCase ,"""sklearn""" )
return (preds == labels).mean()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
warnings.warn(_UpperCAmelCase ,_UpperCAmelCase )
requires_backends(_UpperCAmelCase ,"""sklearn""" )
_a : int =simple_accuracy(_UpperCAmelCase ,_UpperCAmelCase )
_a : int =fa_score(y_true=_UpperCAmelCase ,y_pred=_UpperCAmelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Tuple ) -> Union[str, Any]:
warnings.warn(_UpperCAmelCase ,_UpperCAmelCase )
requires_backends(_UpperCAmelCase ,"""sklearn""" )
_a : Optional[int] =pearsonr(_UpperCAmelCase ,_UpperCAmelCase )[0]
_a : Optional[int] =spearmanr(_UpperCAmelCase ,_UpperCAmelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[Any] ) -> List[str]:
warnings.warn(_UpperCAmelCase ,_UpperCAmelCase )
requires_backends(_UpperCAmelCase ,"""sklearn""" )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), F"Predictions and labels have mismatched lengths {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(_UpperCAmelCase ,_UpperCAmelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_UpperCAmelCase ,_UpperCAmelCase )}
elif task_name == "mrpc":
return acc_and_fa(_UpperCAmelCase ,_UpperCAmelCase )
elif task_name == "sts-b":
return pearson_and_spearman(_UpperCAmelCase ,_UpperCAmelCase )
elif task_name == "qqp":
return acc_and_fa(_UpperCAmelCase ,_UpperCAmelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_UpperCAmelCase ,_UpperCAmelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_UpperCAmelCase ,_UpperCAmelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_UpperCAmelCase ,_UpperCAmelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_UpperCAmelCase ,_UpperCAmelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_UpperCAmelCase ,_UpperCAmelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_UpperCAmelCase ,_UpperCAmelCase )}
else:
raise KeyError(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> int:
warnings.warn(_UpperCAmelCase ,_UpperCAmelCase )
requires_backends(_UpperCAmelCase ,"""sklearn""" )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(_UpperCAmelCase ,_UpperCAmelCase )}
else:
raise KeyError(_UpperCAmelCase )
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None:
if point:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for item in point:
if not isinstance(_UpperCAmelCase ,(int, float) ):
_a : str =(
"""Expected a list of numbers as input, found """
F"{type(_UpperCAmelCase ).__name__}"
)
raise TypeError(_UpperCAmelCase )
else:
_a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}"
raise TypeError(_UpperCAmelCase )
else:
raise ValueError("""Missing an input""" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import os
import string
import sys
A__: Optional[int] = 1 << 8
A__: Tuple = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
A__: Optional[Any] = KEYMAP['''up''']
A__: Tuple = KEYMAP['''left''']
if sys.platform == "win32":
A__: Optional[Any] = []
A__: Dict = {
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
A__: List[Any] = ord(str(i))
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
if os.name == "nt":
import msvcrt
_a : int ="""mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_UpperCAmelCase ) == 0:
# Read the keystroke
_a : Optional[int] =msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_a : List[Any] =ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_a : Union[str, Any] =chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_UpperCAmelCase )
if ord(_UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_a : Optional[Any] =chr(KEYMAP["""esc"""] )
except KeyError:
_a : Tuple =cha[1]
else:
_a : Tuple =ch.decode(_UpperCAmelCase )
else:
_a : Any =WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_a : Dict =sys.stdin.fileno()
_a : int =termios.tcgetattr(_UpperCAmelCase )
try:
tty.setraw(_UpperCAmelCase )
_a : str =sys.stdin.read(1 )
finally:
termios.tcsetattr(_UpperCAmelCase ,termios.TCSADRAIN ,_UpperCAmelCase )
return ch
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
_a : str =get_raw_chars()
if ord(_UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_UpperCAmelCase ) == KEYMAP["esc"]:
_a : List[Any] =get_raw_chars()
if ord(_UpperCAmelCase ) == KEYMAP["mod_int"]:
_a : Optional[Any] =get_raw_chars()
if ord(_UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 694 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : int =order
# a_{0} ... a_{k}
_a : Optional[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_a : Tuple =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_a : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
_a : Tuple =[0.0] * self.order
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < self.order:
_a : int =[1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : int =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : Optional[Any] =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
_a : List[str] =a_coeffs
_a : Union[str, Any] =b_coeffs
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float:
'''simple docstring'''
_a : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_a : str =self.input_history[:-1]
_a : Optional[Any] =self.output_history[:-1]
_a : Optional[int] =sample
_a : Tuple =result
return result
| 694 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : str ,_UpperCAmelCase : set ,_UpperCAmelCase : set ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ,_UpperCAmelCase : PriorityQueue ,_UpperCAmelCase : dict ,_UpperCAmelCase : float | int ,) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_a : Dict =cst_fwd.get(_UpperCAmelCase ,np.inf )
_a : int =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_a : Tuple =new_cost_f
_a : Optional[Any] =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_a : str =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> int:
_a : Optional[Any] =-1
_a : List[str] =set()
_a : Optional[int] =set()
_a : Optional[int] ={source: 0}
_a : List[str] ={destination: 0}
_a : Union[str, Any] ={source: None}
_a : Dict ={destination: None}
_a : PriorityQueue[Any] =PriorityQueue()
_a : PriorityQueue[Any] =PriorityQueue()
_a : Optional[int] =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_a , _a : str =queue_forward.get()
visited_forward.add(_UpperCAmelCase )
_a , _a : List[Any] =queue_backward.get()
visited_backward.add(_UpperCAmelCase )
_a : int =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =pass_and_relaxation(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_a : Any =shortest_distance
return shortest_path_distance
A__: Union[str, Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
A__: str = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ) -> Tuple:
if is_torch_version("""<""" ,"""2.0.0""" ) or not hasattr(_UpperCAmelCase ,"""_dynamo""" ):
return False
return isinstance(_UpperCAmelCase ,torch._dynamo.eval_frame.OptimizedModule )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : bool = True ) -> Optional[Any]:
_a : int =(torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_a : int =is_compiled_module(_UpperCAmelCase )
if is_compiled:
_a : Dict =model
_a : List[Any] =model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Any =model.module
if not keep_fpaa_wrapper:
_a : int =getattr(_UpperCAmelCase ,"""forward""" )
_a : Union[str, Any] =model.__dict__.pop("""_original_forward""" ,_UpperCAmelCase )
if original_forward is not None:
while hasattr(_UpperCAmelCase ,"""__wrapped__""" ):
_a : Tuple =forward.__wrapped__
if forward == original_forward:
break
_a : Optional[Any] =forward
if getattr(_UpperCAmelCase ,"""_converted_to_transformer_engine""" ,_UpperCAmelCase ):
convert_model(_UpperCAmelCase ,to_transformer_engine=_UpperCAmelCase )
if is_compiled:
_a : str =model
_a : Dict =compiled_model
return model
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
PartialState().wait_for_everyone()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ) -> Optional[Any]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_UpperCAmelCase ,_UpperCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(_UpperCAmelCase ,_UpperCAmelCase )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( **_UpperCAmelCase : str ) -> Any:
for key, value in kwargs.items():
_a : Dict =str(_UpperCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ) -> List[Any]:
if not hasattr(_UpperCAmelCase ,"""__qualname__""" ) and not hasattr(_UpperCAmelCase ,"""__name__""" ):
_a : List[Any] =getattr(_UpperCAmelCase ,"""__class__""" ,_UpperCAmelCase )
if hasattr(_UpperCAmelCase ,"""__qualname__""" ):
return obj.__qualname__
if hasattr(_UpperCAmelCase ,"""__name__""" ):
return obj.__name__
return str(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : List[str] ) -> str:
for key, value in source.items():
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[Any] =destination.setdefault(_UpperCAmelCase ,{} )
merge_dicts(_UpperCAmelCase ,_UpperCAmelCase )
else:
_a : int =value
return destination
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = None ) -> bool:
if port is None:
_a : Any =29500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 694 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 100 ) -> int:
return sum(map(_UpperCAmelCase ,str(factorial(_UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 694 | 1 |
'''simple docstring'''
from math import sqrt
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(sqrt(_UpperCAmelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10001 ) -> int:
_a : Tuple =0
_a : str =1
while count != nth and number < 3:
number += 1
if is_prime(_UpperCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(_UpperCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(F"{solution() = }")
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__: List[Any] = logging.get_logger(__name__)
A__: Optional[Any] = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = "instructblip_vision_model"
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple=1_4_0_8 , SCREAMING_SNAKE_CASE :str=6_1_4_4 , SCREAMING_SNAKE_CASE :Tuple=3_9 , SCREAMING_SNAKE_CASE :Optional[int]=1_6 , SCREAMING_SNAKE_CASE :str=2_2_4 , SCREAMING_SNAKE_CASE :Optional[Any]=1_4 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :str=1e-6 , SCREAMING_SNAKE_CASE :Tuple=0.0 , SCREAMING_SNAKE_CASE :List[Any]=1e-10 , SCREAMING_SNAKE_CASE :Any=True , **SCREAMING_SNAKE_CASE :Any , ) -> Any:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : int =hidden_size
_a : List[Any] =intermediate_size
_a : List[str] =num_hidden_layers
_a : List[str] =num_attention_heads
_a : Union[str, Any] =patch_size
_a : int =image_size
_a : Optional[Any] =initializer_range
_a : List[str] =attention_dropout
_a : List[str] =layer_norm_eps
_a : Optional[Any] =hidden_act
_a : Union[str, Any] =qkv_bias
@classmethod
def __UpperCAmelCase ( cls :Tuple , SCREAMING_SNAKE_CASE :Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE :List[Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
_a , _a : List[Any] =cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_a : Dict =config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict = "instructblip_qformer"
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str=3_0_5_2_2 , SCREAMING_SNAKE_CASE :List[Any]=7_6_8 , SCREAMING_SNAKE_CASE :Optional[Any]=1_2 , SCREAMING_SNAKE_CASE :int=1_2 , SCREAMING_SNAKE_CASE :List[str]=3_0_7_2 , SCREAMING_SNAKE_CASE :str="gelu" , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :List[str]=0.1 , SCREAMING_SNAKE_CASE :str=5_1_2 , SCREAMING_SNAKE_CASE :Any=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :int=0 , SCREAMING_SNAKE_CASE :int="absolute" , SCREAMING_SNAKE_CASE :Dict=2 , SCREAMING_SNAKE_CASE :List[Any]=1_4_0_8 , **SCREAMING_SNAKE_CASE :int , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : Optional[int] =vocab_size
_a : Union[str, Any] =hidden_size
_a : int =num_hidden_layers
_a : Optional[int] =num_attention_heads
_a : Any =hidden_act
_a : List[str] =intermediate_size
_a : int =hidden_dropout_prob
_a : List[Any] =attention_probs_dropout_prob
_a : List[str] =max_position_embeddings
_a : Union[str, Any] =initializer_range
_a : Any =layer_norm_eps
_a : Any =position_embedding_type
_a : List[Any] =cross_attention_frequency
_a : Any =encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE :Tuple ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
_a , _a : Dict =cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_a : List[Any] =config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict = "instructblip"
__UpperCamelCase : Tuple = True
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any]=None , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :Any=None , SCREAMING_SNAKE_CASE :Tuple=3_2 , **SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
if vision_config is None:
_a : Dict ={}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_a : Tuple ={}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_a : List[str] ={}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_a : Any =InstructBlipVisionConfig(**SCREAMING_SNAKE_CASE )
_a : List[str] =InstructBlipQFormerConfig(**SCREAMING_SNAKE_CASE )
_a : Tuple =text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_a : int =CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE )
_a : Any =self.text_config.tie_word_embeddings
_a : Dict =self.text_config.is_encoder_decoder
_a : int =num_query_tokens
_a : Any =self.vision_config.hidden_size
_a : Optional[int] =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_a : Tuple =1.0
_a : Optional[int] =0.02
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :InstructBlipVisionConfig , SCREAMING_SNAKE_CASE :InstructBlipQFormerConfig , SCREAMING_SNAKE_CASE :PretrainedConfig , **SCREAMING_SNAKE_CASE :int , ) -> Tuple:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
_a : Any =copy.deepcopy(self.__dict__ )
_a : List[Any] =self.vision_config.to_dict()
_a : Dict =self.qformer_config.to_dict()
_a : List[Any] =self.text_config.to_dict()
_a : int =self.__class__.model_type
return output
| 694 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any]=7 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Tuple=1_8 , SCREAMING_SNAKE_CASE :Any=3_0 , SCREAMING_SNAKE_CASE :List[str]=4_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , ) -> Tuple:
'''simple docstring'''
_a : int =size if size is not None else {"""height""": 1_8, """width""": 1_8}
_a : int =parent
_a : Optional[int] =batch_size
_a : List[str] =num_channels
_a : Optional[Any] =image_size
_a : int =min_resolution
_a : str =max_resolution
_a : str =do_resize
_a : Tuple =size
_a : Tuple =do_normalize
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Any =ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
_a : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """clusters""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , """do_normalize""" ) )
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
_a : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
_a : Dict =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : List[Any] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Any =os.path.join(SCREAMING_SNAKE_CASE , """image_processor.json""" )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict()
_a : Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[str] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE )
_a : str =self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict()
_a : Union[str, Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Any =load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_a : Dict =Image.open(dataset[4]["""file"""] )
_a : Optional[int] =Image.open(dataset[5]["""file"""] )
_a : Optional[Any] =[imagea, imagea]
return images
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_a : int =prepare_images()
# test non-batched
_a : Dict =image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
_a : Optional[int] =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE )
# test batched
_a : Dict =image_processing(SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
_a : Any =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
| 694 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
A__: Dict = random.Random()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Tuple=1.0 ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : List[str]=None ) -> Dict:
if rng is None:
_a : Dict =global_rng
_a : Union[str, Any] =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
def __init__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[int]=7 , SCREAMING_SNAKE_CASE :Any=4_0_0 , SCREAMING_SNAKE_CASE :Union[str, Any]=2_0_0_0 , SCREAMING_SNAKE_CASE :List[Any]=2_4 , SCREAMING_SNAKE_CASE :str=2_4 , SCREAMING_SNAKE_CASE :str=0.0 , SCREAMING_SNAKE_CASE :str=1_6_0_0_0 , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :Dict=True , ) -> List[Any]:
'''simple docstring'''
_a : str =parent
_a : List[str] =batch_size
_a : Optional[Any] =min_seq_length
_a : int =max_seq_length
_a : Optional[Any] =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : Tuple =feature_size
_a : Any =num_mel_bins
_a : List[Any] =padding_value
_a : str =sampling_rate
_a : Optional[Any] =return_attention_mask
_a : List[str] =do_normalize
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Optional[Any]=False , SCREAMING_SNAKE_CASE :Union[str, Any]=False ) -> Union[str, Any]:
'''simple docstring'''
def _flatten(SCREAMING_SNAKE_CASE :Union[str, Any] ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE ) )
if equal_length:
_a : Union[str, Any] =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : str =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : Any =[np.asarray(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Optional[int] = SpeechaTextFeatureExtractor if is_speech_available() else None
def __UpperCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
_a : List[str] =SpeechaTextFeatureExtractionTester(self )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE , axis=0 ) - 1 ) < 1e-3 ) )
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
_a : Dict =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_a : Tuple =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : Optional[int] =[np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
_a : Union[str, Any] =feature_extractor(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_a : Union[str, Any] =feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_a : Optional[int] =feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test batched
_a : Tuple =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
_a : int =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_a : Optional[int] =[floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : Dict =np.asarray(SCREAMING_SNAKE_CASE )
_a : List[Any] =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
_a : Any =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __UpperCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
_a : Any =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : List[str] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : Tuple =["""longest""", """max_length""", """do_not_pad"""]
_a : Dict =[None, 1_6, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_a : Dict =feature_extractor(
SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE )
_a : Any =inputs.input_features
_a : List[str] =inputs.attention_mask
_a : Optional[Any] =[np.sum(SCREAMING_SNAKE_CASE ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
_a : Tuple =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : int =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : Dict =["""longest""", """max_length""", """do_not_pad"""]
_a : Optional[Any] =[None, 1_6, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_a : Optional[Any] =feature_extractor(
SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="""np""" , return_attention_mask=SCREAMING_SNAKE_CASE )
_a : Optional[int] =inputs.input_features
_a : Union[str, Any] =inputs.attention_mask
_a : List[Any] =[np.sum(SCREAMING_SNAKE_CASE ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
_a : Union[str, Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : Optional[Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : str =feature_extractor(
SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=4 , truncation=SCREAMING_SNAKE_CASE , return_tensors="""np""" , return_attention_mask=SCREAMING_SNAKE_CASE , )
_a : Union[str, Any] =inputs.input_features
_a : List[Any] =inputs.attention_mask
_a : Dict =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : Tuple =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : Union[str, Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] =feature_extractor(
SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=4 , truncation=SCREAMING_SNAKE_CASE , return_tensors="""np""" , return_attention_mask=SCREAMING_SNAKE_CASE , )
_a : Optional[Any] =inputs.input_features
_a : List[str] =inputs.attention_mask
_a : Any =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
_a : str =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : int =feature_extractor(
SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=1_6 , truncation=SCREAMING_SNAKE_CASE , return_tensors="""np""" , return_attention_mask=SCREAMING_SNAKE_CASE , )
_a : List[Any] =inputs.input_features
_a : Dict =inputs.attention_mask
_a : Any =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
import torch
_a : Dict =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : str =np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_a : Tuple =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_a : Tuple =feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_a : str =feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] ) -> List[Any]:
'''simple docstring'''
from datasets import load_dataset
_a : str =load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_a : Optional[int] =ds.sort("""id""" ).select(range(SCREAMING_SNAKE_CASE ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __UpperCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
# fmt: off
_a : List[Any] =np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
_a : Tuple =self._load_datasamples(1 )
_a : int =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a : List[str] =feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool:
_a : Optional[int] =len(_UpperCAmelCase )
_a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_a : Any =True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_a : int =False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_a : Optional[Any] =subset[i - 1][j]
if arr[i - 1] <= j:
_a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
A__: Tuple = logging.get_logger(__name__)
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> List[Any]:
'''simple docstring'''
super().__init__()
_a : Any =nn.ModuleList(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :List[torch.tensor] , SCREAMING_SNAKE_CASE :List[float] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.nets ) ):
_a , _a : int =controlnet(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
# merge samples
if i == 0:
_a , _a : Tuple =down_samples, mid_sample
else:
_a : Any =[
samples_prev + samples_curr
for samples_prev, samples_curr in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, os.PathLike] , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Callable = None , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[str] = None , ) -> int:
'''simple docstring'''
_a : Tuple =0
_a : Optional[Any] =save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
SCREAMING_SNAKE_CASE , is_main_process=SCREAMING_SNAKE_CASE , save_function=SCREAMING_SNAKE_CASE , safe_serialization=SCREAMING_SNAKE_CASE , variant=SCREAMING_SNAKE_CASE , )
idx += 1
_a : Optional[int] =model_path_to_save + f"_{idx}"
@classmethod
def __UpperCAmelCase ( cls :List[str] , SCREAMING_SNAKE_CASE :Optional[Union[str, os.PathLike]] , **SCREAMING_SNAKE_CASE :Union[str, Any] ) -> str:
'''simple docstring'''
_a : List[str] =0
_a : List[str] =[]
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_a : str =pretrained_model_path
while os.path.isdir(SCREAMING_SNAKE_CASE ):
_a : List[str] =ControlNetModel.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
controlnets.append(SCREAMING_SNAKE_CASE )
idx += 1
_a : Optional[Any] =pretrained_model_path + f"_{idx}"
logger.info(f"{len(SCREAMING_SNAKE_CASE )} controlnets loaded from {pretrained_model_path}." )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(SCREAMING_SNAKE_CASE )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(SCREAMING_SNAKE_CASE )
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int:
_a : Optional[Any] =[]
_a , _a : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_a , _a : Optional[Any] =b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> List[str]:
assert x is not None
assert y is not None
_a : Optional[int] =len(_UpperCAmelCase )
_a : Optional[int] =len(_UpperCAmelCase )
# declaring the array for storing the dp values
_a : Union[str, Any] =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 ,m + 1 ):
for j in range(1 ,n + 1 ):
_a : Tuple =1 if x[i - 1] == y[j - 1] else 0
_a : Union[str, Any] =max(l[i - 1][j] ,l[i][j - 1] ,l[i - 1][j - 1] + match )
_a : int =""""""
_a , _a : List[str] =m, n
while i > 0 and j > 0:
_a : List[str] =1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_a : Dict =x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
A__: List[str] = '''AGGTAB'''
A__: Optional[int] = '''GXTXAYB'''
A__: List[str] = 4
A__: List[str] = '''GTAB'''
A__ , A__: Optional[Any] = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694 | 1 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
A__: List[Any] = '''
import os
'''
A__: Tuple = '''
def foo():
import os
return False
'''
A__: str = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
A__: Any = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
A__: List[str] = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
A__: Any = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
A__: Any = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
A__: Union[str, Any] = '''
import os
try:
import bar
except:
raise ValueError()
'''
A__: Tuple = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
A__: Union[str, Any] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
A__: Union[str, Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ) -> Dict:
_a : Optional[Any] =os.path.join(_UpperCAmelCase ,"""test_file.py""" )
with open(_UpperCAmelCase ,"""w""" ) as _tmp_file:
_tmp_file.write(_UpperCAmelCase )
_a : Dict =get_imports(_UpperCAmelCase )
assert parsed_imports == ["os"]
| 694 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
# Initialise PyTorch model
_a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
A__: Any = logging.get_logger(__name__)
class A__ ( UpperCAmelCase__ ):
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> Dict:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_a : List[Any] =[label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self :List[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int] ) -> int:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(SCREAMING_SNAKE_CASE ) )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_a : Optional[Any] =[sequences]
_a : Tuple =[]
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(SCREAMING_SNAKE_CASE )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase__ )
class A__ ( UpperCAmelCase__ ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int]=ZeroShotClassificationArgumentHandler() , *SCREAMING_SNAKE_CASE :Dict , **SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : Tuple =args_parser
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :str=True , SCREAMING_SNAKE_CASE :Optional[int]=TruncationStrategy.ONLY_FIRST , **SCREAMING_SNAKE_CASE :Optional[int] ) -> List[Any]:
'''simple docstring'''
_a : str =self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
_a : Union[str, Any] =self.tokenizer.eos_token
try:
_a : Any =self.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , )
except Exception as e:
if "too short" in str(SCREAMING_SNAKE_CASE ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_a : List[Any] =self.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def __UpperCAmelCase ( self :Tuple , **SCREAMING_SNAKE_CASE :Tuple ) -> List[Any]:
'''simple docstring'''
if kwargs.get("""multi_class""" , SCREAMING_SNAKE_CASE ) is not None:
_a : int =kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
_a : Tuple ={}
if "candidate_labels" in kwargs:
_a : Any =self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
_a : Optional[Any] =kwargs["""hypothesis_template"""]
_a : List[str] ={}
if "multi_label" in kwargs:
_a : Union[str, Any] =kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, List[str]] , *SCREAMING_SNAKE_CASE :Optional[Any] , **SCREAMING_SNAKE_CASE :int , ) -> List[str]:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) == 0:
pass
elif len(SCREAMING_SNAKE_CASE ) == 1 and "candidate_labels" not in kwargs:
_a : int =args[0]
else:
raise ValueError(f"Unable to understand extra arguments {args}" )
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :str=None , SCREAMING_SNAKE_CASE :Optional[Any]="This example is {}." ) -> Dict:
'''simple docstring'''
_a , _a : str =self._args_parser(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i, (candidate_label, sequence_pair) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ):
_a : Union[str, Any] =self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(SCREAMING_SNAKE_CASE ) - 1,
**model_input,
}
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> int:
'''simple docstring'''
_a : List[Any] =inputs["""candidate_label"""]
_a : Dict =inputs["""sequence"""]
_a : Optional[Any] ={k: inputs[k] for k in self.tokenizer.model_input_names}
_a : Dict =self.model(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] ={
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[int]=False ) -> Union[str, Any]:
'''simple docstring'''
_a : Optional[Any] =[outputs["""candidate_label"""] for outputs in model_outputs]
_a : Optional[int] =[outputs["""sequence"""] for outputs in model_outputs]
_a : Any =np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
_a : int =logits.shape[0]
_a : Union[str, Any] =len(SCREAMING_SNAKE_CASE )
_a : List[Any] =N // n
_a : Optional[int] =logits.reshape((num_sequences, n, -1) )
if multi_label or len(SCREAMING_SNAKE_CASE ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_a : Optional[Any] =self.entailment_id
_a : Dict =-1 if entailment_id == 0 else 0
_a : Union[str, Any] =reshaped_outputs[..., [contradiction_id, entailment_id]]
_a : Optional[int] =np.exp(SCREAMING_SNAKE_CASE ) / np.exp(SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=SCREAMING_SNAKE_CASE )
_a : int =scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_a : str =reshaped_outputs[..., self.entailment_id]
_a : Union[str, Any] =np.exp(SCREAMING_SNAKE_CASE ) / np.exp(SCREAMING_SNAKE_CASE ).sum(-1 , keepdims=SCREAMING_SNAKE_CASE )
_a : List[Any] =list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Union[str, Any] = '''▁'''
A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A__: Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : Dict =vocab_file
_a : int =monolingual_vocab_file
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : List[Any] ={}
_a : List[str] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[Any] =cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_a : int =line.strip().split()[0]
_a : str =len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[int] =len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =self.__dict__.copy()
_a : Optional[Any] =None
_a : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
_a : List[str] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple ={}
_a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
_a : int =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[str] =[self.sep_token_id]
_a : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : int =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Any =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.