code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'data2vec-audio'
def __init__( self , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=(512, 512, 512, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE_=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE_=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=19 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=0.05 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="sum" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=(512, 512, 512, 512, 1500) , SCREAMING_SNAKE_CASE_=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE_=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = feat_extract_activation
lowerCamelCase_ = list(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = list(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = list(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = conv_bias
lowerCamelCase_ = num_conv_pos_embeddings
lowerCamelCase_ = num_conv_pos_embedding_groups
lowerCamelCase_ = conv_pos_kernel_size
lowerCamelCase_ = len(self.conv_dim )
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = feat_proj_dropout
lowerCamelCase_ = final_dropout
lowerCamelCase_ = layerdrop
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = vocab_size
lowerCamelCase_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ = mask_time_prob
lowerCamelCase_ = mask_time_length
lowerCamelCase_ = mask_time_min_masks
lowerCamelCase_ = mask_feature_prob
lowerCamelCase_ = mask_feature_length
lowerCamelCase_ = mask_feature_min_masks
# ctc loss
lowerCamelCase_ = ctc_loss_reduction
lowerCamelCase_ = ctc_zero_infinity
# adapter
lowerCamelCase_ = add_adapter
lowerCamelCase_ = adapter_kernel_size
lowerCamelCase_ = adapter_stride
lowerCamelCase_ = num_adapter_layers
lowerCamelCase_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase_ = list(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = list(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = list(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = xvector_output_dim
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
return math.prod(self.conv_stride )
| 42 |
import functools
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
@functools.cache
def min_distance(_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _SCREAMING_SNAKE_CASE ) , 1 + min_distance(_SCREAMING_SNAKE_CASE , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = """encodec"""
def __init__( self , lowercase__=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase__=2_4_0_0_0 , lowercase__=1 , lowercase__=False , lowercase__=None , lowercase__=None , lowercase__=1_2_8 , lowercase__=3_2 , lowercase__=1 , lowercase__=[8, 5, 4, 2] , lowercase__="weight_norm" , lowercase__=7 , lowercase__=7 , lowercase__=3 , lowercase__=2 , lowercase__=True , lowercase__="reflect" , lowercase__=2 , lowercase__=2 , lowercase__=1.0 , lowercase__=1_0_2_4 , lowercase__=None , lowercase__=True , **lowercase__ , ):
'''simple docstring'''
__A =target_bandwidths
__A =sampling_rate
__A =audio_channels
__A =normalize
__A =chunk_length_s
__A =overlap
__A =hidden_size
__A =num_filters
__A =num_residual_layers
__A =upsampling_ratios
__A =norm_type
__A =kernel_size
__A =last_kernel_size
__A =residual_kernel_size
__A =dilation_growth_rate
__A =use_causal_conv
__A =pad_mode
__A =compress
__A =num_lstm_layers
__A =trim_right_ratio
__A =codebook_size
__A =codebook_dim if codebook_dim is not None else hidden_size
__A =use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**lowercase__ )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 702 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCamelCase : List[str] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def A__ ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] , __A : List[str] , __A : Union[str, Any] ) ->str:
for attribute in key.split('''.''' ):
__A =getattr(__A , __A )
if weight_type is not None:
__A =getattr(__A , __A ).shape
else:
__A =hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__A =value
elif weight_type == "weight_g":
__A =value
elif weight_type == "weight_v":
__A =value
elif weight_type == "bias":
__A =value
else:
__A =value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A__ ( __A : int , __A : str ) ->List[str]:
__A =[]
__A =fairseq_model.state_dict()
__A =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__A =None
for name, value in fairseq_dict.items():
__A =False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == '''group''' , )
__A =True
elif name.split('''.''' )[0] == "proj":
__A =fairseq_model.proj
__A =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__A =True
if "*" in mapped_key:
__A =name.split(__A )[0].split('''.''' )[-2]
__A =mapped_key.replace('''*''' , __A )
if "weight_g" in name:
__A ='''weight_g'''
elif "weight_v" in name:
__A ='''weight_v'''
elif "bias" in name:
__A ='''bias'''
elif "weight" in name:
__A ='''weight'''
else:
__A =None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def A__ ( __A : str , __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : str ) ->Optional[Any]:
__A =full_name.split('''conv_layers.''' )[-1]
__A =name.split('''.''' )
__A =int(items[0] )
__A =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__A =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__A =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__A =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__A =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__A )
def A__ ( __A : Optional[Any] ) ->List[Any]:
__A , __A =emb.weight.shape
__A =nn.Linear(__A , __A , bias=__A )
__A =emb.weight.data
return lin_layer
def A__ ( __A : Dict ) ->Optional[int]:
with open(__A , '''r''' , encoding='''utf-8''' ) as f:
__A =f.readlines()
__A =[line.split(''' ''' )[0] for line in lines]
__A =len(__A )
__A ={
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def A__ ( __A : List[Any] , __A : Optional[Any] , __A : Tuple , __A : int , __A : str , __A : str , __A : Dict , ) ->Tuple:
__A =WavaVecaConfig.from_pretrained(__A )
__A =SpeechaTextaConfig.from_pretrained(
__A , vocab_size=__A , decoder_layers=__A , do_stable_layer_norm=__A )
__A =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
__A , __A , __A =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__A =model[0].eval()
# set weights for wav2vec2 encoder
__A =WavaVecaModel(__A )
__A =recursively_load_weights_wavaveca(model.encoder , __A )
__A =SpeechaTextaForCausalLM(__A )
__A , __A =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
__A =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__A =SpeechEncoderDecoderModel(encoder=__A , decoder=__A )
__A =False
# add projection layer
__A =nn.Parameter(projection_layer.weight )
__A =nn.Parameter(projection_layer.bias )
__A =create_vocab_dict(__A )
with open(os.path.join(__A , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(__A , __A )
__A =SpeechaTextaTokenizer(os.path.join(__A , '''vocab.json''' ) )
tokenizer.save_pretrained(__A )
__A =hf_wavavec.config.to_dict()
__A =tokenizer.pad_token_id
__A =tokenizer.bos_token_id
__A =tokenizer.eos_token_id
__A ='''speech_to_text_2'''
__A ='''wav2vec2'''
__A =SpeechEncoderDecoderConfig.from_dict(__A )
hf_wavavec.save_pretrained(__A )
feature_extractor.save_pretrained(__A )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
_lowerCamelCase : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 516 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :str = SavedModel()
UpperCamelCase__ :List[str] = []
with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase__ :Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ )
UpperCamelCase__ :List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowercase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowercase__ , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset) | 45 |
"""simple docstring"""
a : str = range(2, 20 + 1)
a : Optional[Any] = [10**k for k in range(ks[-1] + 1)]
a : dict[int, dict[int, list[list[int]]]] = {}
def lowercase__(A , A , A , A ) ->Any:
"""simple docstring"""
lowercase__ : str= sum(a_i[j] for j in range(A , len(A ) ) )
lowercase__ : int= sum(a_i[j] * base[j] for j in range(min(len(A ) , A ) ) )
lowercase__, lowercase__ : Optional[Any]= 0, 0
lowercase__ : Any= n - i
lowercase__ : Union[str, Any]= memo.get(A )
if sub_memo is not None:
lowercase__ : List[str]= sub_memo.get(A )
if jumps is not None and len(A ) > 0:
# find and make the largest jump without going over
lowercase__ : List[str]= -1
for _k in range(len(A ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase__ : Any= _k
break
if max_jump >= 0:
lowercase__, lowercase__, lowercase__ : str= jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase__ : List[Any]= diff + c
for j in range(min(A , len(A ) ) ):
lowercase__, lowercase__ : Union[str, Any]= divmod(A , 10 )
if new_c > 0:
add(A , A , A )
else:
lowercase__ : Any= []
else:
lowercase__ : List[str]= {c: []}
lowercase__ : Union[str, Any]= sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase__, lowercase__ : Optional[int]= next_term(A , k - 1 , i + dn , A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase__, lowercase__ : str= compute(A , A , i + dn , A )
diff += _diff
dn += terms_jumped
lowercase__ : Optional[Any]= sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase__ : Dict= 0
while j < len(A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A , (diff, dn, k) )
return (diff, dn)
def lowercase__(A , A , A , A ) ->Optional[Any]:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(A ):
a_i.extend([0 for _ in range(k - len(A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase__ : int= i
lowercase__, lowercase__, lowercase__ : Union[str, Any]= 0, 0, 0
for j in range(len(A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase__ : Tuple= ds_c + ds_b
diff += addend
lowercase__ : List[Any]= 0
for j in range(A ):
lowercase__ : int= a_i[j] + addend
lowercase__, lowercase__ : Any= divmod(A , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A , A , A )
return diff, i - start_i
def lowercase__(A , A , A ) ->Any:
"""simple docstring"""
for j in range(A , len(A ) ):
lowercase__ : List[str]= digits[j] + addend
if s >= 10:
lowercase__, lowercase__ : str= divmod(A , 10 )
lowercase__ : Optional[int]= addend // 10 + quotient
else:
lowercase__ : int= s
lowercase__ : Union[str, Any]= addend // 10
if addend == 0:
break
while addend > 0:
lowercase__, lowercase__ : str= divmod(A , 10 )
digits.append(A )
def lowercase__(A = 10**15 ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= [1]
lowercase__ : Dict= 1
lowercase__ : List[Any]= 0
while True:
lowercase__, lowercase__ : List[str]= next_term(A , 20 , i + dn , A )
dn += terms_jumped
if dn == n - i:
break
lowercase__ : int= 0
for j in range(len(A ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 218 | 0 |
'''simple docstring'''
import baseaa
def _snake_case (_snake_case : str) -> bytes:
return baseaa.baaencode(string.encode('utf-8'))
def _snake_case (_snake_case : bytes) -> str:
return baseaa.baadecode(UpperCAmelCase__).decode('utf-8')
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = "Hello World!"
_SCREAMING_SNAKE_CASE = baseaa_encode(test)
print(encoded)
_SCREAMING_SNAKE_CASE = baseaa_decode(encoded)
print(decoded)
| 715 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =CTRLTokenizer
__lowerCAmelCase : int =False
__lowerCAmelCase : Union[str, Any] =False
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase =['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_lowercase =dict(zip(snake_case, range(len(snake_case))))
_lowercase =['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_lowercase ={'unk_token': '<unk>'}
_lowercase =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
_lowercase =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(snake_case) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(snake_case))
def UpperCamelCase__ ( self :Union[str, Any], **snake_case :Optional[Any]):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname, **snake_case)
def UpperCamelCase__ ( self :Tuple, snake_case :str):
"""simple docstring"""
_lowercase ='adapt react readapt apt'
_lowercase ='adapt react readapt apt'
return input_text, output_text
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
_lowercase ='adapt react readapt apt'
_lowercase ='adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_lowercase =tokenizer.tokenize(snake_case)
self.assertListEqual(snake_case, snake_case)
_lowercase =tokens + [tokenizer.unk_token]
_lowercase =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case), snake_case)
| 557 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = BlenderbotSmallTokenizer
__lowerCamelCase : List[str] = False
def a__ (self ) -> Tuple:
"""simple docstring"""
super().setUp()
_a = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_a = dict(zip(A , range(len(A ) ) ) )
_a = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_a = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def a__ (self , **A ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **A )
def a__ (self , A ) -> Union[str, Any]:
"""simple docstring"""
_a = '''adapt act apte'''
_a = '''adapt act apte'''
return input_text, output_text
def a__ (self ) -> Any:
"""simple docstring"""
_a = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''adapt act apte'''
_a = ['''adapt''', '''act''', '''ap@@''', '''te''']
_a = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_a = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_384]
_a = '''I am a small frog.'''
_a = tok([src_text] , padding=A , truncation=A )['''input_ids''']
_a = tok.batch_decode(A , skip_special_tokens=A , clean_up_tokenization_spaces=A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a__ (self ) -> int:
"""simple docstring"""
_a = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_a = '''I am a small frog .'''
_a = '''.'''
_a = tok(A )['''input_ids''']
_a = tok(A )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 11 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : List[str] = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class a ( _lowerCamelCase ):
snake_case_ = "wavlm"
def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any]=32 , lowercase_ : Optional[int]=768 , lowercase_ : List[str]=12 , lowercase_ : Dict=12 , lowercase_ : Any=3072 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Dict=0.1 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : int=0.1 , lowercase_ : Optional[int]=0.02 , lowercase_ : Dict=1e-5 , lowercase_ : Tuple="group" , lowercase_ : str="gelu" , lowercase_ : Any=(512, 512, 512, 512, 512, 512, 512) , lowercase_ : str=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : Any=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : Tuple=False , lowercase_ : List[Any]=128 , lowercase_ : int=16 , lowercase_ : Tuple=320 , lowercase_ : Union[str, Any]=800 , lowercase_ : List[str]=False , lowercase_ : str=True , lowercase_ : List[Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=320 , lowercase_ : Optional[Any]=2 , lowercase_ : List[str]=0.1 , lowercase_ : int=100 , lowercase_ : Tuple=256 , lowercase_ : Tuple=256 , lowercase_ : Dict=0.1 , lowercase_ : Any="mean" , lowercase_ : Tuple=False , lowercase_ : Dict=False , lowercase_ : str=256 , lowercase_ : Optional[int]=(512, 512, 512, 512, 1500) , lowercase_ : List[str]=(5, 3, 3, 1, 1) , lowercase_ : Any=(1, 2, 3, 1, 1) , lowercase_ : int=512 , lowercase_ : List[Any]=80 , lowercase_ : Optional[Any]=0 , lowercase_ : int=1 , lowercase_ : List[str]=2 , lowercase_ : List[str]=False , lowercase_ : Dict=3 , lowercase_ : List[Any]=2 , lowercase_ : Optional[Any]=3 , lowercase_ : int=None , **lowercase_ : List[str] , ):
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(lowercase_ )
snake_case_ = list(lowercase_ )
snake_case_ = list(lowercase_ )
snake_case_ = conv_bias
snake_case_ = num_buckets
snake_case_ = max_bucket_distance
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = num_ctc_classes
snake_case_ = vocab_size
snake_case_ = do_stable_layer_norm
snake_case_ = use_weighted_layer_sum
snake_case_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case_ = num_codevectors_per_group
snake_case_ = num_codevector_groups
snake_case_ = contrastive_logits_temperature
snake_case_ = num_negatives
snake_case_ = codevector_dim
snake_case_ = proj_codevector_dim
snake_case_ = diversity_loss_weight
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# adapter
snake_case_ = add_adapter
snake_case_ = adapter_kernel_size
snake_case_ = adapter_stride
snake_case_ = num_adapter_layers
snake_case_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(lowercase_ )
snake_case_ = list(lowercase_ )
snake_case_ = list(lowercase_ )
snake_case_ = xvector_output_dim
@property
def A_ ( self : Tuple ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 640 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : List[str] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 716 |
def _lowerCAmelCase ( UpperCamelCase__: str ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 546 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : List[Any] = "altclip_text_model"
def __init__( self , _A=2_5_0_0_0_2 , _A=1_0_2_4 , _A=2_4 , _A=1_6 , _A=4_0_9_6 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_4 , _A=1 , _A=0.02 , _A=0.02 , _A=1E-05 , _A=1 , _A=0 , _A=2 , _A="absolute" , _A=True , _A=7_6_8 , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =initializer_factor
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =position_embedding_type
_SCREAMING_SNAKE_CASE =use_cache
_SCREAMING_SNAKE_CASE =project_dim
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : List[str] = "altclip_vision_model"
def __init__( self , _A=7_6_8 , _A=3_0_7_2 , _A=5_1_2 , _A=1_2 , _A=1_2 , _A=3 , _A=2_2_4 , _A=3_2 , _A="quick_gelu" , _A=1E-5 , _A=0.0 , _A=0.02 , _A=1.0 , **_A , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =projection_dim
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =initializer_factor
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =hidden_act
@classmethod
def UpperCamelCase_ ( cls , _A , **_A ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
_SCREAMING_SNAKE_CASE =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : List[Any] = "altclip"
lowercase : Optional[int] = True
def __init__( self , _A=None , _A=None , _A=7_6_8 , _A=2.6592 , **_A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =kwargs.pop('''text_config_dict''' , UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =kwargs.pop('''vision_config_dict''' , UpperCamelCase_ )
super().__init__(**UpperCamelCase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_SCREAMING_SNAKE_CASE ={}
# This is the complete result when using `text_config_dict`.
_SCREAMING_SNAKE_CASE =AltCLIPTextConfig(**UpperCamelCase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_SCREAMING_SNAKE_CASE =(
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
_SCREAMING_SNAKE_CASE =(
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(UpperCamelCase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_SCREAMING_SNAKE_CASE ={}
# This is the complete result when using `vision_config_dict`.
_SCREAMING_SNAKE_CASE =AltCLIPVisionConfig(**UpperCamelCase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_SCREAMING_SNAKE_CASE ={
str(UpperCamelCase_ ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_SCREAMING_SNAKE_CASE =(
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
_SCREAMING_SNAKE_CASE =(
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(UpperCamelCase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_SCREAMING_SNAKE_CASE ={}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
_SCREAMING_SNAKE_CASE ={}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
_SCREAMING_SNAKE_CASE =AltCLIPTextConfig(**UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =AltCLIPVisionConfig(**UpperCamelCase_ )
_SCREAMING_SNAKE_CASE =projection_dim
_SCREAMING_SNAKE_CASE =logit_scale_init_value
_SCREAMING_SNAKE_CASE =1.0
@classmethod
def UpperCamelCase_ ( cls , _A , _A , **_A ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase_ )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.text_config.to_dict()
_SCREAMING_SNAKE_CASE =self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 255 | '''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class __A (__magic_name__ ):
snake_case :List[str] = "git_vision_model"
def __init__( self , UpperCamelCase_=7_68 , UpperCamelCase_=30_72 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3 , UpperCamelCase_=2_24 , UpperCamelCase_=16 , UpperCamelCase_="quick_gelu" , UpperCamelCase_=1E-5 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Union[str, Any] = patch_size
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : Union[str, Any] = layer_norm_eps
__UpperCAmelCase : List[str] = hidden_act
@classmethod
def _snake_case ( cls , UpperCamelCase_ , **UpperCamelCase_ ):
cls._set_token_in_kwargs(UpperCamelCase_ )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
__UpperCAmelCase : Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class __A (__magic_name__ ):
snake_case :Optional[int] = "git"
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=6 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10_24 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=0 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=1_01 , UpperCamelCase_=1_02 , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
if vision_config is None:
__UpperCAmelCase : str = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
__UpperCAmelCase : int = GitVisionConfig(**UpperCamelCase_ )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : Tuple = tie_word_embeddings
__UpperCAmelCase : Optional[int] = num_image_with_embedding
__UpperCAmelCase : Tuple = bos_token_id
__UpperCAmelCase : List[str] = eos_token_id
def _snake_case ( self ):
__UpperCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Union[str, Any] = self.vision_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
| 168 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def _a ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict=1000 ):
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_lowerCAmelCase = n - 1
_lowerCAmelCase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_lowerCAmelCase = 0
while count < prec:
_lowerCAmelCase = random.randint(2 , n - 1 )
_lowerCAmelCase = bin_exp_mod(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if b != 1:
_lowerCAmelCase = True
for _ in range(__SCREAMING_SNAKE_CASE ):
if b == n - 1:
_lowerCAmelCase = False
break
_lowerCAmelCase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_UpperCamelCase: List[Any] =abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 720 |
from PIL import Image
def _a ( __SCREAMING_SNAKE_CASE : Image ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = image.size
_lowerCAmelCase = 0
_lowerCAmelCase = image.load()
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__SCREAMING_SNAKE_CASE ):
for i in range(__SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_UpperCamelCase: List[Any] =mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 585 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : str = CustomTokenizer
pass
| 100 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['pixel_values']
def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Union[str, Any] =do_rescale
lowercase : List[Any] =rescale_factor
lowercase : Tuple =do_pad
lowercase : List[str] =pad_size
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ )
lowercase : Tuple =(old_height // size + 1) * size - old_height
lowercase : Tuple =(old_width // size + 1) * size - old_width
return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase : int =do_rescale if do_rescale is not None else self.do_rescale
lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : int =do_pad if do_pad is not None else self.do_pad
lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size
lowercase : Any =make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_pad:
lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowercase : Any ={'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 92 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( __lowerCamelCase,unittest.TestCase ):
_lowercase : Optional[Any] = KandinskyVaaInpaintPipeline
_lowercase : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_lowercase : List[Any] = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_lowercase : List[Any] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_lowercase : int = False
@property
def UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
return 3_2
@property
def UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
return 3_2
@property
def UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return 1_0_0
@property
def UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
A_ = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
A_ = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
A_ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
A_ = self.dummy_unet
A_ = self.dummy_movq
A_ = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowerCamelCase__ , )
A_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCamelCase ( self : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
A_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
A_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create init_image
A_ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
A_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
A_ = np.ones((6_4, 6_4) , dtype=np.floataa )
A_ = 0
if str(lowerCamelCase__ ).startswith('''mps''' ):
A_ = torch.manual_seed(lowerCamelCase__ )
else:
A_ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
A_ = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
A_ = '''cpu'''
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**lowerCamelCase__ )
A_ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
A_ = output.images
A_ = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
A_ = image[0, -3:, -3:, -1]
A_ = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
A_ = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
A_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
A_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
A_ = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
A_ = 0
A_ = '''a hat'''
A_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
A_ = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
A_ = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
A_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
A_ ,A_ = pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
A_ = pipeline(
image=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
A_ = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 716 |
import datasets
from .evaluate import evaluate
__lowercase = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
__lowercase = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
__lowercase = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def UpperCamelCase ( self : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
A_ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
A_ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
A_ = evaluate(dataset=lowerCamelCase__ , predictions=lowerCamelCase__ )
return score
| 563 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : List[Any] = FlaxAutoencoderKL
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = 4
_lowercase : str = 3
_lowercase : str = (32, 32)
_lowercase : int = jax.random.PRNGKey(0)
_lowercase : Tuple = jax.random.uniform(lowerCamelCase, ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
_lowercase : Any = self.dummy_input
return init_dict, inputs_dict
| 89 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ ( A : Dict ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def lowerCamelCase__ ( A : Dict , A : Optional[Any] ):
'''simple docstring'''
return (-y * np.log(A ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase__ ( A : Union[str, Any] , A : Any , A : int ):
'''simple docstring'''
UpperCAmelCase = np.dot(A , A )
return np.sum(y * scores - np.log(1 + np.exp(A ) ) )
def lowerCamelCase__ ( A : List[str] , A : str , A : int , A : Dict=7_00_00 ):
'''simple docstring'''
UpperCAmelCase = np.zeros(x.shape[1] )
for iterations in range(A ):
UpperCAmelCase = np.dot(A , A )
UpperCAmelCase = sigmoid_function(A )
UpperCAmelCase = np.dot(x.T , h - y ) / y.size
UpperCAmelCase = theta - alpha * gradient # updating the weights
UpperCAmelCase = np.dot(A , A )
UpperCAmelCase = sigmoid_function(A )
UpperCAmelCase = cost_function(A , A )
if iterations % 1_00 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_lowercase : List[str] = datasets.load_iris()
_lowercase : List[Any] = iris.data[:, :2]
_lowercase : Union[str, Any] = (iris.target != 0) * 1
_lowercase : Any = 0.1
_lowercase : Optional[Any] = logistic_reg(alpha, x, y, max_iterations=70000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return sigmoid_function(
np.dot(A , A ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((_lowercase) , (_lowercase)) : Optional[Any] = (x[:, 0].min(), x[:, 0].max())
((_lowercase) , (_lowercase)) : Any = (x[:, 1].min(), x[:, 1].max())
((_lowercase) , (_lowercase)) : Any = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_lowercase : Any = np.c_[xxa.ravel(), xxa.ravel()]
_lowercase : Dict = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 210 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase (__lowercase , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ShapEImgaImgPipeline
lowercase__ = ["""image"""]
lowercase__ = ["""image"""]
lowercase__ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
lowercase__ = False
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return 8
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
UpperCamelCase_ = CLIPVisionModel(snake_case__ )
return model
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=snake_case__ , do_normalize=snake_case__ , do_resize=snake_case__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase_ = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
UpperCamelCase_ = PriorTransformer(**snake_case__ )
return model
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase_ = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
UpperCamelCase_ = ShapERenderer(**snake_case__ )
return model
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.dummy_prior
UpperCamelCase_ = self.dummy_image_encoder
UpperCamelCase_ = self.dummy_image_processor
UpperCamelCase_ = self.dummy_renderer
UpperCamelCase_ = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=snake_case__ , clip_sample=snake_case__ , clip_sample_range=1.0 , )
UpperCamelCase_ = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _lowerCamelCase ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(snake_case__ )
else:
UpperCamelCase_ = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCamelCase_ = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = "cpu"
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**snake_case__ )
UpperCamelCase_ = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCamelCase_ = pipe(**self.get_dummy_inputs(snake_case__ ) )
UpperCamelCase_ = output.images[0]
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase_ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCamelCase ( self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = torch_device == "cpu"
UpperCamelCase_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=snake_case__ , relax_max_difference=snake_case__ , )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**snake_case__ )
UpperCamelCase_ = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = self.get_dummy_inputs(snake_case__ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase_ = batch_size * [inputs[key]]
UpperCamelCase_ = pipe(**snake_case__ , num_images_per_prompt=snake_case__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
UpperCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
UpperCamelCase_ = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
UpperCamelCase_ = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCamelCase_ = torch.Generator(device=snake_case__ ).manual_seed(0 )
UpperCamelCase_ = pipe(
snake_case__ , generator=snake_case__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 706 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
UpperCAmelCase : str =logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowercase (a_ ):
'''simple docstring'''
def __init__( self , *snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
UpperCamelCase_ = eval_examples
UpperCamelCase_ = post_process_function
UpperCamelCase_ = quant_trainer_args
UpperCamelCase_ = 128 # default number of calibration samples
def _lowerCamelCase ( self , snake_case__=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
UpperCamelCase_ = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCamelCase_ = self._remove_unused_columns(snake_case__ , description="Calibration" )
return DataLoader(
snake_case__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=snake_case__ , )
def _lowerCamelCase ( self , snake_case__=None ):
'''simple docstring'''
UpperCamelCase_ = self.train_dataset if calib_dataset is None else calib_dataset
UpperCamelCase_ = self.get_calib_dataloader(snake_case__ )
UpperCamelCase_ = self.model
quant_trainer.configure_model(snake_case__ , self.quant_trainer_args , calib=snake_case__ )
model.eval()
quant_trainer.enable_calibration(snake_case__ )
logger.info("***** Running calibration *****" )
logger.info(F""" Num examples = {self.calib_num}""" )
logger.info(F""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(snake_case__ ):
# Prediction step
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prediction_step(snake_case__ , snake_case__ , prediction_loss_only=snake_case__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(snake_case__ , self.quant_trainer_args )
UpperCamelCase_ = model
def _lowerCamelCase ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = "eval" ):
'''simple docstring'''
UpperCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase_ = self.get_eval_dataloader(snake_case__ )
UpperCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase_ = self.compute_metrics
UpperCamelCase_ = None
UpperCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase_ = eval_loop(
snake_case__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , )
finally:
UpperCamelCase_ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCamelCase_ = self.post_process_function(snake_case__ , snake_case__ , output.predictions )
UpperCamelCase_ = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCamelCase_ = metrics.pop(snake_case__ )
self.log(snake_case__ )
else:
UpperCamelCase_ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case__ )
return metrics
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__ = "test" ):
'''simple docstring'''
UpperCamelCase_ = self.get_test_dataloader(snake_case__ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase_ = self.compute_metrics
UpperCamelCase_ = None
UpperCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase_ = eval_loop(
snake_case__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , )
finally:
UpperCamelCase_ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase_ = self.post_process_function(snake_case__ , snake_case__ , output.predictions , "predict" )
UpperCamelCase_ = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCamelCase_ = metrics.pop(snake_case__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case__ )
def _lowerCamelCase ( self , snake_case__="./" ):
'''simple docstring'''
UpperCamelCase_ = self.eval_dataset
UpperCamelCase_ = self.get_eval_dataloader(snake_case__ )
UpperCamelCase_ = next(iter(snake_case__ ) )
# saving device - to make it consistent
UpperCamelCase_ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
UpperCamelCase_ = tuple(v.to(snake_case__ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
UpperCamelCase_ = True
UpperCamelCase_ = self.model.to(snake_case__ )
model.eval()
model.float()
UpperCamelCase_ = model.module if hasattr(snake_case__ , "module" ) else model
quant_trainer.configure_model(snake_case__ , self.quant_trainer_args )
UpperCamelCase_ = os.path.join(snake_case__ , "model.onnx" )
logger.info(F"""exporting model to {output_model_file}""" )
UpperCamelCase_ = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
snake_case__ , snake_case__ , snake_case__ , export_params=snake_case__ , opset_version=13 , do_constant_folding=snake_case__ , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=snake_case__ , )
logger.info("onnx export finished" )
| 504 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase__ : Optional[int] = 50_00_00
lowercase__ , lowercase__ : List[str] = os.path.split(__file__)
lowercase__ : str = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a__ ( lowercase : datasets.Dataset, **lowercase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = dataset.map(**lowercase )
@get_duration
def a__ ( lowercase : datasets.Dataset, **lowercase : Optional[Any] ) -> str:
"""simple docstring"""
_UpperCamelCase = dataset.filter(**lowercase )
def a__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(lowercase, '''dataset.arrow''' ), lowercase, num_examples=lowercase )
_UpperCamelCase = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=lowercase )
def tokenize(lowercase : List[Any] ):
return tokenizer(examples['''text'''] )
_UpperCamelCase = map(lowercase )
_UpperCamelCase = map(lowercase, batched=lowercase )
_UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase )
with dataset.formatted_as(type='''numpy''' ):
_UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase )
with dataset.formatted_as(type='''pandas''' ):
_UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
_UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
_UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase )
_UpperCamelCase = map(lowercase, function=lowercase, batched=lowercase )
_UpperCamelCase = filter(lowercase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase, '''wb''' ) as f:
f.write(json.dumps(lowercase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 98 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ : Dict = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : Tuple = PegasusConfig
_snake_case : Any = {}
_snake_case : Dict = 'gelu'
def __init__( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict=13 , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Tuple=99 , lowerCAmelCase__ : Optional[Any]=32 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Union[str, Any]=37 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : List[str]=20 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : Tuple=0 , ) -> int:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_pegasus_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> int:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(lowerCAmelCase__ )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = model.decode(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(lowerCAmelCase__ )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = model.decode(lowerCAmelCase__ , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def a__ ( lowercase : Union[str, Any], lowercase : Tuple, lowercase : Optional[Any], lowercase : Union[str, Any]=None, lowercase : Optional[Any]=None, ) -> Any:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = np.not_equal(lowercase, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_snake_case : Optional[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_snake_case : Union[str, Any] = True
_snake_case : Optional[int] = False
_snake_case : Any = False
_snake_case : List[Any] = False
def snake_case__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ )
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = model_class(lowerCAmelCase__ )
@jax.jit
def encode_jitted(lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict=None , **lowerCAmelCase__ : Union[str, Any] ):
return model.encode(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = encode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : int ) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ):
return model.decode(
decoder_input_ids=lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , encoder_outputs=lowerCAmelCase__ , )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = decode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : Any ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=lowerCAmelCase__ )
_UpperCamelCase = np.ones((1, 1) )
_UpperCamelCase = model(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
_UpperCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
_UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_UpperCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_UpperCamelCase = tokenizer(lowerCAmelCase__ , return_tensors='''np''' , truncation=lowerCAmelCase__ , max_length=512 , padding=lowerCAmelCase__ )
_UpperCamelCase = model.generate(**lowerCAmelCase__ , num_beams=2 ).sequences
_UpperCamelCase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
assert tgt_text == decoded
| 98 | 1 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def A ( _UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : int ) -> float:
'''simple docstring'''
__lowerCAmelCase : List[str] = x
__lowerCAmelCase : Optional[Any] = y
for step in range(_UpperCAmelCase ): # noqa: B007
__lowerCAmelCase : List[Any] = a * a - b * b + x
__lowerCAmelCase : int = 2 * a * b + y
__lowerCAmelCase : Union[str, Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def A ( _UpperCAmelCase : float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def A ( _UpperCAmelCase : float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_UpperCAmelCase ,1 ,1 ) )
def A ( _UpperCAmelCase : int = 8_0_0 ,_UpperCAmelCase : int = 6_0_0 ,_UpperCAmelCase : float = -0.6 ,_UpperCAmelCase : float = 0 ,_UpperCAmelCase : float = 3.2 ,_UpperCAmelCase : int = 5_0 ,_UpperCAmelCase : bool = True ,) -> Image.Image:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = Image.new('RGB' ,(image_width, image_height) )
__lowerCAmelCase : Optional[Any] = img.load()
# loop through the image-coordinates
for image_x in range(_UpperCAmelCase ):
for image_y in range(_UpperCAmelCase ):
# determine the figure-coordinates based on the image-coordinates
__lowerCAmelCase : Tuple = figure_width / image_width * image_height
__lowerCAmelCase : int = figure_center_x + (image_x / image_width - 0.5) * figure_width
__lowerCAmelCase : Tuple = figure_center_y + (image_y / image_height - 0.5) * figure_height
__lowerCAmelCase : int = get_distance(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__lowerCAmelCase : int = get_color_coded_rgb(_UpperCAmelCase )
else:
__lowerCAmelCase : Dict = get_black_and_white_rgb(_UpperCAmelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 721 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase__ ( a , unittest.TestCase ):
'''simple docstring'''
_snake_case = FlaxAutoencoderKL
@property
def snake_case ( self ) -> str:
__lowerCAmelCase : Union[str, Any] = 4
__lowerCAmelCase : Tuple = 3
__lowerCAmelCase : str = (32, 32)
__lowerCAmelCase : Dict = jax.random.PRNGKey(0 )
__lowerCAmelCase : List[str] = jax.random.uniform(SCREAMING_SNAKE_CASE , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case ( self ) -> int:
__lowerCAmelCase : List[Any] = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__lowerCAmelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
| 123 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = '''ClapFeatureExtractor'''
__A = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any ) -> Any:
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self : List[Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : str ) -> List[str]:
UpperCAmelCase_ = kwargs.pop('''sampling_rate''' , lowerCAmelCase_ )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
UpperCAmelCase_ = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if audios is not None:
UpperCAmelCase_ = self.feature_extractor(
lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and audios is not None:
UpperCAmelCase_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def UpperCamelCase ( self : Optional[int] , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCamelCase ( self : Any , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : str ) -> str:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 121 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : Optional[Any] = 16
_lowerCamelCase : List[Any] = 32
def _lowerCAmelCase ( __magic_name__ :Accelerator , __magic_name__ :int = 1_6 ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__magic_name__ :int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ :List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ = 1_6
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ = 8
else:
UpperCAmelCase_ = None
return tokenizer.pad(
__magic_name__ , padding='''longest''' , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ , drop_last=__magic_name__ )
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def _lowerCAmelCase ( __magic_name__ :Tuple , __magic_name__ :List[Any] ):
# Initialize accelerator
UpperCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config['''lr''']
UpperCAmelCase_ = int(config['''num_epochs'''] )
UpperCAmelCase_ = int(config['''seed'''] )
UpperCAmelCase_ = int(config['''batch_size'''] )
UpperCAmelCase_ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ = MAX_GPU_BATCH_SIZE
set_seed(__magic_name__ )
UpperCAmelCase_, UpperCAmelCase_ = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=__magic_name__ )
# Instantiate scheduler
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=1_0_0 , num_training_steps=(len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ = model(**__magic_name__ )
UpperCAmelCase_ = outputs.loss
UpperCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__magic_name__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ = model(**__magic_name__ )
UpperCAmelCase_ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_, UpperCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
UpperCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __magic_name__ )
def _lowerCAmelCase ( ):
UpperCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__magic_name__ , default=__magic_name__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 121 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Optional[Any] = tempfile.mkdtemp()
# fmt: off
_snake_case: Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_snake_case: str = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_snake_case: str = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_snake_case: Optional[int] = {'unk_token': '<unk>'}
_snake_case: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
_snake_case: Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case: Union[str, Any] = os.path.join(self.tmpdirname , __snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__snake_case , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **__snake_case : Tuple ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **__snake_case : Any ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : str , **__snake_case : List[Any] ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case: str = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_snake_case: Any = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: Dict = self.get_tokenizer()
_snake_case: List[str] = self.get_rust_tokenizer()
_snake_case: List[str] = self.get_image_processor()
_snake_case: Union[str, Any] = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case: Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__snake_case )
_snake_case: Any = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case: int = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __snake_case )
self.assertIsInstance(processor_fast.tokenizer , __snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __snake_case )
self.assertIsInstance(processor_fast.image_processor , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case: Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case: List[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_snake_case: List[Any] = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
_snake_case: Any = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: List[Any] = self.get_image_processor()
_snake_case: List[str] = self.get_tokenizer()
_snake_case: Any = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_snake_case: List[str] = self.prepare_image_inputs()
_snake_case: List[str] = image_processor(__snake_case , return_tensors='np' )
_snake_case: Dict = processor(images=__snake_case , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: List[Any] = self.get_image_processor()
_snake_case: Any = self.get_tokenizer()
_snake_case: List[str] = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_snake_case: int = 'lower newer'
_snake_case: str = processor(text=__snake_case )
_snake_case: List[Any] = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: List[str] = self.get_image_processor()
_snake_case: List[Any] = self.get_tokenizer()
_snake_case: Tuple = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_snake_case: Optional[int] = 'lower newer'
_snake_case: Union[str, Any] = self.prepare_image_inputs()
_snake_case: int = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Dict = self.get_image_processor()
_snake_case: Dict = self.get_tokenizer()
_snake_case: str = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_snake_case: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case: List[Any] = processor.batch_decode(__snake_case )
_snake_case: Any = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: Union[str, Any] = self.get_image_processor()
_snake_case: Any = self.get_tokenizer()
_snake_case: Any = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_snake_case: Any = 'lower newer'
_snake_case: Optional[Any] = self.prepare_image_inputs()
_snake_case: int = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 715 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
def __init__( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : str=13 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=2 , __snake_case : Dict=3 , __snake_case : Optional[Any]=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=32 , __snake_case : Optional[int]=5 , __snake_case : Any=4 , __snake_case : int=37 , __snake_case : int="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : int=10 , __snake_case : Any=0.02 , __snake_case : List[str]=None , __snake_case : Tuple=2 , ):
'''simple docstring'''
_snake_case: Optional[Any] = parent
_snake_case: Tuple = batch_size
_snake_case: str = image_size
_snake_case: int = patch_size
_snake_case: Union[str, Any] = num_channels
_snake_case: Dict = is_training
_snake_case: Optional[Any] = use_labels
_snake_case: Optional[Any] = hidden_size
_snake_case: Tuple = num_hidden_layers
_snake_case: List[Any] = num_attention_heads
_snake_case: Union[str, Any] = intermediate_size
_snake_case: List[str] = hidden_act
_snake_case: Tuple = hidden_dropout_prob
_snake_case: List[Any] = attention_probs_dropout_prob
_snake_case: str = type_sequence_label_size
_snake_case: Any = initializer_range
_snake_case: str = scope
_snake_case: Union[str, Any] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case: Tuple = (image_size // patch_size) ** 2
_snake_case: List[str] = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case: List[str] = None
if self.use_labels:
_snake_case: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case: Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : List[str] ):
'''simple docstring'''
_snake_case: Dict = ViTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Tuple = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : int ):
'''simple docstring'''
_snake_case: int = ViTForMaskedImageModeling(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Dict = model(__snake_case )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_snake_case: List[str] = 1
_snake_case: Tuple = ViTForMaskedImageModeling(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case: Union[str, Any] = model(__snake_case )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ):
'''simple docstring'''
_snake_case: Optional[int] = self.type_sequence_label_size
_snake_case: Union[str, Any] = ViTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: List[Any] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case: Tuple = 1
_snake_case: Optional[int] = ViTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case: Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
): int = config_and_inputs
_snake_case: Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Optional[int] = ViTModelTester(self )
_snake_case: Union[str, Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case , _snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case: Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case: Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case , _snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case: int = model_class(__snake_case )
_snake_case: List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case: List[Any] = [*signature.parameters.keys()]
_snake_case: str = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case: Any = ViTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowercase_ ( ) ->List[Any]:
_snake_case: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: Optional[int] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(__snake_case )
_snake_case: Dict = self.default_image_processor
_snake_case: Optional[Any] = prepare_img()
_snake_case: List[str] = image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
_snake_case: Optional[int] = model(**__snake_case )
# verify the logits
_snake_case: Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __snake_case )
_snake_case: Dict = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: str = ViTModel.from_pretrained('facebook/dino-vits8' ).to(__snake_case )
_snake_case: Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_80 )
_snake_case: Optional[int] = prepare_img()
_snake_case: Dict = image_processor(images=__snake_case , return_tensors='pt' )
_snake_case: Optional[Any] = inputs.pixel_values.to(__snake_case )
# forward pass
with torch.no_grad():
_snake_case: str = model(__snake_case , interpolate_pos_encoding=__snake_case )
# verify the logits
_snake_case: List[str] = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , __snake_case )
_snake_case: Any = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: List[Any] = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
_snake_case: Dict = self.default_image_processor
_snake_case: Any = prepare_img()
_snake_case: str = image_processor(images=__snake_case , return_tensors='pt' )
_snake_case: Any = inputs.pixel_values.to(__snake_case )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_snake_case: int = model(__snake_case )
| 273 | 0 |
'''simple docstring'''
import numpy as np
import qiskit
def lowerCAmelCase ( UpperCamelCase__ : int = 8 , UpperCamelCase__ : int | None = None ):
"""simple docstring"""
__UpperCAmelCase = np.random.default_rng(seed=UpperCamelCase__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__UpperCAmelCase = 6 * key_len
# Measurement basis for Alice's qubits.
__UpperCAmelCase = rng.integers(2 , size=UpperCamelCase__ )
# The set of states Alice will prepare.
__UpperCAmelCase = rng.integers(2 , size=UpperCamelCase__ )
# Measurement basis for Bob's qubits.
__UpperCAmelCase = rng.integers(2 , size=UpperCamelCase__ )
# Quantum Circuit to simulate BB84
__UpperCAmelCase = qiskit.QuantumCircuit(UpperCamelCase__ , name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(UpperCamelCase__ ):
if alice_state[index] == 1:
bbaa_circ.x(UpperCamelCase__ )
if alice_basis[index] == 1:
bbaa_circ.h(UpperCamelCase__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(UpperCamelCase__ ):
if bob_basis[index] == 1:
bbaa_circ.h(UpperCamelCase__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__UpperCAmelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__UpperCAmelCase = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1 , seed_simulator=UpperCamelCase__ )
# Returns the result of measurement.
__UpperCAmelCase = job.result().get_counts(UpperCamelCase__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__UpperCAmelCase = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__UpperCAmelCase = gen_key[:key_len] if len(UpperCamelCase__ ) >= key_len else gen_key.ljust(UpperCamelCase__ , '''0''' )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 262 | '''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
# A local function to see if a dot lands in the circle.
def is_in_circle(UpperCamelCase__ : float , UpperCamelCase__ : float ) -> bool:
__UpperCAmelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__UpperCAmelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCamelCase__ ) )
# The ratio of the area for circle to square is pi/4.
__UpperCAmelCase = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Callable[[float], float] , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 1.0 , ):
"""simple docstring"""
return mean(
function_to_integrate(uniform(UpperCamelCase__ , UpperCamelCase__ ) ) for _ in range(UpperCamelCase__ ) ) * (max_value - min_value)
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 1.0 ):
"""simple docstring"""
def identity_function(UpperCamelCase__ : float ) -> float:
return x
__UpperCAmelCase = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print('''******************''' )
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
def function_to_integrate(UpperCamelCase__ : float ) -> float:
return sqrt(4.0 - x * x )
__UpperCAmelCase = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262 | 1 |
from __future__ import annotations
def A__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def A__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def A__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 | import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowercase_ : Optional[Any] = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ) -> int:
SCREAMING_SNAKE_CASE__: Optional[int]= None
SCREAMING_SNAKE_CASE__: Dict= os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
SCREAMING_SNAKE_CASE__: List[Any]= os.path.abspath('''examples''' )
for item in os.listdir(lowerCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
SCREAMING_SNAKE_CASE__: Tuple= os.path.join(lowerCAmelCase , lowerCAmelCase )
if os.path.isfile(lowerCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCAmelCase , feature_script=lowerCAmelCase , tested_section='''main()''' if parser_only else '''training_function()''' , ):
SCREAMING_SNAKE_CASE__: List[Any]= compare_against_test(
os.path.join(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= '''\n'''.join(lowerCAmelCase )
if special_strings is not None:
for string in special_strings:
SCREAMING_SNAKE_CASE__: Union[str, Any]= diff.replace(lowerCAmelCase , '''''' )
self.assertEqual(lowerCAmelCase , '''''' )
def UpperCamelCase_ ( self ) -> Optional[Any]:
self.one_complete_example('''complete_nlp_example.py''' , lowerCAmelCase )
self.one_complete_example('''complete_nlp_example.py''' , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: List[Any]= os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
SCREAMING_SNAKE_CASE__: Dict= [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.one_complete_example('''complete_cv_example.py''' , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class _lowerCamelCase ( UpperCamelCase_ ):
__a = False
@classmethod
def UpperCamelCase_ ( cls ) -> List[Any]:
super().setUpClass()
SCREAMING_SNAKE_CASE__: List[Any]= tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__: Tuple= os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE__: Dict= ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def UpperCamelCase_ ( cls ) -> List[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Dict= f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: List[Any]= f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
SCREAMING_SNAKE_CASE__: List[Any]= run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Tuple= f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
SCREAMING_SNAKE_CASE__: Dict= run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase )
self.assertNotIn('''epoch 0:''' , lowerCAmelCase )
self.assertIn('''epoch 1:''' , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
SCREAMING_SNAKE_CASE__: Dict= run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase )
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE__: List[Any]= 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , lowerCAmelCase )
self.assertIn('''epoch 1:''' , lowerCAmelCase )
else:
self.assertIn('''epoch 0:''' , lowerCAmelCase )
self.assertIn('''epoch 1:''' , lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
SCREAMING_SNAKE_CASE__: Any= run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= re.findall('''({.+})''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= [r for r in results if '''accuracy''' in r][-1]
SCREAMING_SNAKE_CASE__: List[str]= ast.literal_eval(lowerCAmelCase )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase_ ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
SCREAMING_SNAKE_CASE__: Optional[int]= f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase , '''tracking''' ) ) )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: int= ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[str]= ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 107 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __magic_name__ ( snake_case ):
def __init__( self : List[Any] , lowerCamelCase__ : Tuple ):
lowerCAmelCase : Optional[int] = data
def __iter__( self : Dict ):
for element in self.data:
yield element
def UpperCAmelCase__ ( __magic_name__ : Tuple=True ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = Accelerator(even_batches=__magic_name__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def UpperCAmelCase__ ( __magic_name__ : Accelerator , __magic_name__ : int , __magic_name__ : int , __magic_name__ : bool = False ):
'''simple docstring'''
if iterable:
lowerCAmelCase : Optional[Any] = DummyIterableDataset(torch.as_tensor(range(__magic_name__ ) ) )
else:
lowerCAmelCase : str = TensorDataset(torch.as_tensor(range(__magic_name__ ) ) )
lowerCAmelCase : Tuple = DataLoader(__magic_name__ , batch_size=__magic_name__ )
lowerCAmelCase : List[Any] = accelerator.prepare(__magic_name__ )
return dl
def UpperCAmelCase__ ( __magic_name__ : Accelerator , __magic_name__ : int , __magic_name__ : int , __magic_name__ : List[int] , __magic_name__ : List[int] , ):
'''simple docstring'''
lowerCAmelCase : List[str] = create_dataloader(accelerator=__magic_name__ , dataset_size=__magic_name__ , batch_size=__magic_name__ )
lowerCAmelCase : int = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__magic_name__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__magic_name__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = create_accelerator(even_batches=__magic_name__ )
verify_dataloader_batch_sizes(
__magic_name__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__magic_name__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = create_accelerator(even_batches=__magic_name__ )
lowerCAmelCase : Any = torch.nn.Linear(1 , 1 )
lowerCAmelCase : Any = accelerator.prepare(__magic_name__ )
lowerCAmelCase : List[Any] = create_dataloader(__magic_name__ , dataset_size=3 , batch_size=1 )
lowerCAmelCase : Union[str, Any] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__magic_name__ ):
lowerCAmelCase : List[str] = ddp_model(batch[0].float() )
lowerCAmelCase : Union[str, Any] = output.sum()
loss.backward()
batch_idxs.append(__magic_name__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def UpperCAmelCase__ ( __magic_name__ : Union[str, Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=__magic_name__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __magic_name__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = True
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = create_accelerator(even_batches=__magic_name__ )
lowerCAmelCase : Tuple = torch.nn.Linear(1 , 1 )
lowerCAmelCase : Optional[int] = accelerator.prepare(__magic_name__ )
lowerCAmelCase : Optional[int] = create_dataloader(__magic_name__ , dataset_size=3 , batch_size=1 )
lowerCAmelCase : Any = create_dataloader(__magic_name__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__magic_name__ ):
lowerCAmelCase : List[Any] = train_dl.batch_sampler.even_batches
lowerCAmelCase : Tuple = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = True
lowerCAmelCase : List[str] = False
lowerCAmelCase : Union[str, Any] = create_accelerator(even_batches=__magic_name__ )
lowerCAmelCase : int = torch.nn.Linear(1 , 1 )
lowerCAmelCase : Tuple = accelerator.prepare(__magic_name__ )
create_dataloader(__magic_name__ , dataset_size=3 , batch_size=1 , iterable=__magic_name__ )
lowerCAmelCase : Dict = create_dataloader(__magic_name__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__magic_name__ ):
lowerCAmelCase : Dict = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : Any = create_accelerator()
lowerCAmelCase : List[str] = torch.nn.Linear(1 , 1 )
lowerCAmelCase : List[str] = accelerator.prepare(__magic_name__ )
create_dataloader(__magic_name__ , dataset_size=3 , batch_size=1 , iterable=__magic_name__ )
with warnings.catch_warnings(record=__magic_name__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__magic_name__ ):
pass
assert issubclass(w[-1].category , __magic_name__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
lowerCAmelCase : Optional[Any] = accelerator.state.distributed_type
lowerCAmelCase : List[Any] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__magic_name__ )
lowerCAmelCase : Dict = original_state
if __name__ == "__main__":
main()
| 348 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 348 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Any ='''laion/clap-htsat-unfused'''
lowercase : int =tempfile.mkdtemp()
def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] =self.get_tokenizer()
lowercase : str =self.get_feature_extractor()
lowercase : List[str] =ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
lowercase : str =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Tuple =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowercase : int =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase : str =self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
lowercase : int =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : str =self.get_feature_extractor()
lowercase : Any =self.get_tokenizer()
lowercase : List[Any] =ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
lowercase : int =floats_list((3, 1000) )
lowercase : str =feature_extractor(UpperCAmelCase__ , return_tensors='''np''' )
lowercase : int =processor(audios=UpperCAmelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.get_feature_extractor()
lowercase : Optional[Any] =self.get_tokenizer()
lowercase : Optional[int] =ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
lowercase : str ='''This is a test string'''
lowercase : Optional[Any] =processor(text=UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer(UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Dict =self.get_feature_extractor()
lowercase : Optional[Any] =self.get_tokenizer()
lowercase : Any =ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
lowercase : Optional[int] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : str =processor.batch_decode(UpperCAmelCase__ )
lowercase : Optional[Any] =tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =self.get_feature_extractor()
lowercase : str =self.get_tokenizer()
lowercase : List[str] =ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
| 88 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCamelCase_ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 88 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : int ):
lowerCAmelCase = word.split()
def justify(_UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
lowerCAmelCase = max_width - width
lowerCAmelCase = len(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_UpperCAmelCase ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase = []
for i in range(_UpperCAmelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = 0
for word in words:
if width + len(_UpperCAmelCase ) + len(_UpperCAmelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_UpperCAmelCase )
width += len(_UpperCAmelCase )
else:
# justify the line and add it to result
answer.append(justify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
# reset new line and new width
lowerCAmelCase ,lowerCAmelCase = [word], len(_UpperCAmelCase )
lowerCAmelCase = max_width - width - len(_UpperCAmelCase )
answer.append(' '.join(_UpperCAmelCase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | """simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : str , _snake_case : Optional[Any] , _snake_case : Tuple=13 , _snake_case : Any=30 , _snake_case : List[str]=2 , _snake_case : int=3 , _snake_case : List[Any]=True , _snake_case : str=True , _snake_case : Tuple=32 , _snake_case : Tuple=2 , _snake_case : Dict=4 , _snake_case : int=37 , _snake_case : List[str]="gelu" , _snake_case : Any=0.1 , _snake_case : int=0.1 , _snake_case : Optional[int]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Optional[int]=3 , _snake_case : Tuple=None , ) -> Optional[int]:
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = type_sequence_label_size
a__ = initializer_range
a__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ = (image_size // patch_size) ** 2
a__ = num_patches + 1
def _lowerCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : Any , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : str ) -> Optional[Any]:
'''simple docstring'''
a__ = TFViTModel(config=_snake_case )
a__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
a__ = self.image_size // 2
a__ = pixel_values[:, :, :image_size, :image_size]
a__ = model(_snake_case , interpolate_pos_encoding=_snake_case , training=_snake_case )
a__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : List[Any] , _snake_case : Dict , _snake_case : Any , _snake_case : List[str] ) -> Dict:
'''simple docstring'''
a__ = self.type_sequence_label_size
a__ = TFViTForImageClassification(_snake_case )
a__ = model(_snake_case , labels=_snake_case , training=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
a__ = self.image_size // 2
a__ = pixel_values[:, :, :image_size, :image_size]
a__ = model(_snake_case , interpolate_pos_encoding=_snake_case , training=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ = 1
a__ = TFViTForImageClassification(_snake_case )
a__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( a , a , unittest.TestCase ):
"""simple docstring"""
a_ : List[str] =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
a_ : List[str] =(
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
a_ : Optional[int] =False
a_ : Optional[Any] =False
a_ : Optional[Any] =False
def _lowerCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
a__ = TFViTModelTester(self )
a__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _lowerCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
pass
def _lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
a__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , tf.keras.layers.Layer ) )
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(_snake_case )
a__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _lowerCAmelCase ( self : str ) -> Any:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _lowerCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_snake_case )
def _lowerCamelCase ( ) -> Any:
'''simple docstring'''
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
a__ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=_snake_case , return_tensors='tf' )
# forward pass
a__ = model(**_snake_case )
# verify the logits
a__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _snake_case )
a__ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _snake_case , atol=1E-4 )
| 232 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Union[str, Any] = ["image_processor", "tokenizer"]
UpperCamelCase_ : List[Any] = "CLIPImageProcessor"
UpperCamelCase_ : Union[str, Any] = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : List[str] , snake_case__ : str=None , snake_case__ : Any=None , **snake_case__ : List[Any] ):
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , snake_case__ , )
lowerCAmelCase__ = kwargs.pop("""feature_extractor""" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(snake_case__ , snake_case__ )
def __call__( self : List[str] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : int=None , **snake_case__ : Optional[int] ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCAmelCase__ = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
lowerCAmelCase__ = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
lowerCAmelCase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict , *snake_case__ : int , **snake_case__ : Optional[Any] ):
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *snake_case__ : Dict , **snake_case__ : List[str] ):
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.tokenizer.model_input_names
lowerCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 674 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCAmelCase__ = CLIPImageProcessor(**snake_case__ )
# save in new folder
model_config.save_pretrained(snake_case__ )
config.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with self.assertRaisesRegex(
snake_case__ , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""clip-base""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
with self.assertRaisesRegex(
snake_case__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , revision="""aaaaaa""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
with self.assertRaisesRegex(
snake_case__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _SCREAMING_SNAKE_CASE ( self : Any ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoImageProcessor.register(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json"""
lowerCAmelCase__ = Path(snake_case__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) )
lowerCAmelCase__ = CustomImageProcessor.from_pretrained(snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case__ )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Tuple = True
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoImageProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(snake_case__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 674 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__lowerCAmelCase = pytest.mark.integration
@require_faiss
class __magic_name__ ( _UpperCamelCase ):
def __lowercase ( self : Union[str, Any] ):
_a : Any = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowercase ( self : int ):
import faiss
_a : Dataset = self._create_dummy_dataset()
_a : Tuple = dset.map(
lambda _UpperCAmelCase ,_UpperCAmelCase : {"vecs": i * np.ones(5 ,dtype=np.floataa )} ,with_indices=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase )
_a : Any = dset.add_faiss_index('vecs' ,batch_size=100 ,metric_type=faiss.METRIC_INNER_PRODUCT )
_a , _a : int = dset.get_nearest_examples('vecs' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
dset.drop_index('vecs' )
def __lowercase ( self : List[Any] ):
import faiss
_a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='vecs' ,batch_size=100 ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
_a , _a : Optional[int] = dset.get_nearest_examples('vecs' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
def __lowercase ( self : Dict ):
import faiss
_a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='vecs' ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('vecs' ,tmp_file.name )
dset.load_faiss_index('vecs2' ,tmp_file.name )
os.unlink(tmp_file.name )
_a , _a : List[str] = dset.get_nearest_examples('vecs2' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
def __lowercase ( self : str ):
_a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_UpperCAmelCase ,partial(dset.get_nearest_examples ,'vecs2' ,np.ones(5 ,dtype=np.floataa ) ) )
def __lowercase ( self : Optional[Any] ):
from elasticsearch import Elasticsearch
_a : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
_a : Optional[Any] = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
_a : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
_a : Any = Elasticsearch()
dset.add_elasticsearch_index('filename' ,es_client=_UpperCAmelCase )
_a , _a : Dict = dset.get_nearest_examples('filename' ,'my_name-train_29' )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
@require_faiss
class __magic_name__ ( _UpperCamelCase ):
def __lowercase ( self : str ):
import faiss
_a : Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal ,5 )
index.add_vectors(np.zeros((5, 5) ,dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal ,10 )
# single query
_a : Optional[Any] = np.zeros(5 ,dtype=np.floataa )
_a : Dict = 1
_a , _a : Optional[Any] = index.search(_UpperCAmelCase )
self.assertRaises(_UpperCAmelCase ,index.search ,query.reshape(-1 ,1 ) )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
# batched queries
_a : Any = np.eye(5 ,dtype=np.floataa )[::-1]
_a , _a : List[Any] = index.search_batch(_UpperCAmelCase )
self.assertRaises(_UpperCAmelCase ,index.search_batch ,queries[0] )
_a : Union[str, Any] = [scores[0] for scores in total_scores]
_a : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCAmelCase ) ,0 )
self.assertListEqual([4, 3, 2, 1, 0] ,_UpperCAmelCase )
def __lowercase ( self : Tuple ):
import faiss
_a : Tuple = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
_a : Union[str, Any] = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexLSH )
with self.assertRaises(_UpperCAmelCase ):
_a : Union[str, Any] = FaissIndex(string_factory='Flat' ,custom_index=faiss.IndexFlat(5 ) )
def __lowercase ( self : List[str] ):
import faiss
_a : Dict = faiss.IndexFlat(5 )
_a : Optional[int] = FaissIndex(custom_index=_UpperCAmelCase )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
def __lowercase ( self : List[str] ):
import faiss
_a : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
_a : str = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_a : Optional[int] = np.zeros(5 ,dtype=np.floataa )
_a : List[Any] = 1
_a , _a : str = index.search(_UpperCAmelCase )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
@require_faiss
def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple:
import faiss
_a : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_a : int = 'index.faiss'
_a : Optional[Any] = f"""mock://{index_name}"""
index.save(lowerCAmelCase_ , storage_options=mockfs.storage_options )
_a : int = FaissIndex.load(lowerCAmelCase_ , storage_options=mockfs.storage_options )
_a : Tuple = np.zeros(5 , dtype=np.floataa )
_a : int = 1
_a , _a : List[str] = index.search(lowerCAmelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __magic_name__ ( _UpperCamelCase ):
def __lowercase ( self : Tuple ):
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
_a : Optional[int] = Elasticsearch()
_a : Union[str, Any] = {'acknowledged': True}
_a : Optional[int] = ElasticSearchIndex(es_client=_UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
_a : Tuple = 'foo'
_a : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
_a , _a : List[str] = index.search(_UpperCAmelCase )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# single query with timeout
_a : str = 'foo'
_a : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
_a , _a : Any = index.search(_UpperCAmelCase ,request_timeout=30 )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# batched queries
_a : Union[str, Any] = ['foo', 'bar', 'foobar']
_a : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
_a , _a : Tuple = index.search_batch(_UpperCAmelCase )
_a : Union[str, Any] = [scores[0] for scores in total_scores]
_a : str = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCAmelCase ) ,0 )
self.assertListEqual([1, 1, 1] ,_UpperCAmelCase )
# batched queries with timeout
_a : Dict = ['foo', 'bar', 'foobar']
_a : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
_a , _a : str = index.search_batch(_UpperCAmelCase ,request_timeout=30 )
_a : Dict = [scores[0] for scores in total_scores]
_a : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_UpperCAmelCase ) ,0 )
self.assertListEqual([1, 1, 1] ,_UpperCAmelCase )
| 358 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
_a : Tuple = sorted(string.lower() )
return len(lowerCAmelCase_ ) == len(set(lowerCAmelCase_ ) )
if __name__ == "__main__":
__lowerCAmelCase = input('''Enter a string ''').strip()
__lowerCAmelCase = is_isogram(input_str)
print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 358 | 1 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _A (__a , __a , __a , __a , __a ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__lowerCAmelCase )] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __lowerCAmelCase ) ) , x.transpose() ) , __lowerCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _A (__a , __a , __a ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (1, 2, 1)
SCREAMING_SNAKE_CASE_ : str = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SARIMAX(
__lowerCAmelCase , exog=__lowerCAmelCase , order=__lowerCAmelCase , seasonal_order=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = model.fit(disp=__lowerCAmelCase , maxiter=6_00 , method='''nm''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_fit.predict(1 , len(__lowerCAmelCase ) , exog=[test_match] )
return result[0]
def _A (__a , __a , __a ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = regressor.predict(__lowerCAmelCase )
return y_pred[0]
def _A (__a ) -> float:
"""simple docstring"""
train_user.sort()
SCREAMING_SNAKE_CASE_ : List[Any] = np.percentile(__lowerCAmelCase , 25 )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.percentile(__lowerCAmelCase , 75 )
SCREAMING_SNAKE_CASE_ : Dict = qa - qa
SCREAMING_SNAKE_CASE_ : int = qa - (iqr * 0.1)
return low_lim
def _A (__a , __a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE_ : Tuple = not_safe + 1
else:
if abs(abs(__lowerCAmelCase ) - abs(__lowerCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCAmelCase_ : int = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
UpperCAmelCase_ : str = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
UpperCAmelCase_ : Tuple = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCAmelCase_ : Union[str, Any] = normalize_df[:, 2].tolist()
UpperCAmelCase_ : Any = normalize_df[:, 0].tolist()
UpperCAmelCase_ : List[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCAmelCase_ : int = normalize_df[:, [1, 2]].tolist()
UpperCAmelCase_ : List[str] = x[: len(x) - 1]
UpperCAmelCase_ : Optional[int] = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCAmelCase_ : List[Any] = total_date[: len(total_date) - 1]
UpperCAmelCase_ : Dict = total_user[: len(total_user) - 1]
UpperCAmelCase_ : Optional[Any] = total_match[: len(total_match) - 1]
UpperCAmelCase_ : Dict = total_date[len(total_date) - 1 :]
UpperCAmelCase_ : List[Any] = total_user[len(total_user) - 1 :]
UpperCAmelCase_ : int = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCAmelCase_ : Dict = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCAmelCase_ : str = "" if data_safety_checker(res_vote, tst_user) else "not "
print("""Today's data is {not_str}safe.""")
| 712 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Dict , lowercase_ : str=3 , lowercase_ : Dict=7 , lowercase_ : Any=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=99 , lowercase_ : Dict=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : Optional[Any]=4 , lowercase_ : List[str]=37 , lowercase_ : int="gelu" , lowercase_ : int=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Any=512 , lowercase_ : List[Any]=16 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=3 , lowercase_ : Dict=4 , lowercase_ : Optional[int]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = seq_length
SCREAMING_SNAKE_CASE_ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE_ : Dict = use_input_mask
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : str = use_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : int = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : int = num_labels
SCREAMING_SNAKE_CASE_ : Dict = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = FalconModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : Dict , lowercase_ : int , lowercase_ : str , lowercase_ : str , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : Optional[int] = FalconModel(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = FalconForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Any , lowercase_ : int , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = FalconForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size)
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat([input_mask, next_mask] , dim=-1)
SCREAMING_SNAKE_CASE_ : List[str] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
SCREAMING_SNAKE_CASE_ : Any = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
# select random slice
SCREAMING_SNAKE_CASE_ : int = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (FalconForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = FalconModelTester(self)
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE_ : Optional[int] = alibi
self.model_tester.create_and_check_model(lowercase_ , *lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : Tuple = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : Optional[int] = input_ids.ne(1).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Tuple = FalconForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = 3
SCREAMING_SNAKE_CASE_ : int = '''single_label_classification'''
SCREAMING_SNAKE_CASE_ : Optional[int] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.ne(1).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FalconForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = FalconForCausalLM(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(lowercase_ , use_cache=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = input_ids.shape[0]
SCREAMING_SNAKE_CASE_ : Dict = model._convert_to_rw_cache(result.past_key_values)
SCREAMING_SNAKE_CASE_ : int = model._convert_cache_to_standard_format(lowercase_ , lowercase_)
for layer in range(len(lowercase_)):
for tensor_idx in range(2):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3)
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4)
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx]))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : int = 3
SCREAMING_SNAKE_CASE_ : Tuple = '''multi_label_classification'''
SCREAMING_SNAKE_CASE_ : Tuple = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : List[str] = input_ids.ne(1).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
SCREAMING_SNAKE_CASE_ : int = FalconForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowercase_ , '''use_cache'''):
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_).to(lowercase_)
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : str = model(**lowercase_)
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE_ : Tuple = (
getattr(lowercase_ , '''decoder_layers''' , lowercase_)
or getattr(lowercase_ , '''num_decoder_layers''' , lowercase_)
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE_ : str = getattr(lowercase_ , '''num_kv_heads''' , config.num_attention_heads)
SCREAMING_SNAKE_CASE_ : Tuple = getattr(lowercase_ , '''d_model''' , config.hidden_size)
SCREAMING_SNAKE_CASE_ : Dict = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE_ : int = outputs['''past_key_values''']
self.assertEqual(len(lowercase_) , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = inputs['''input_ids'''].shape
for i in range(lowercase_):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE_ : Dict = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
self.assertEqual(len(past_kv[0]) , 2) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''')
SCREAMING_SNAKE_CASE_ : Optional[int] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''')
model.eval()
model.to(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=19)
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.batch_decode(lowercase_)[0]
self.assertEqual(lowercase_ , lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : str = FalconForCausalLM.from_pretrained(lowercase_)
model.eval()
model.to(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowercase_)
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=4)
model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=4)
model.generate(**lowercase_ , num_beams=2 , max_new_tokens=4)
@slow
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = FalconForCausalLM.from_pretrained(lowercase_)
model.eval()
model.to(device=lowercase_)
SCREAMING_SNAKE_CASE_ : int = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowercase_)
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE_ : Any = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=20 , use_cache=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=20 , use_cache=lowercase_)
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
| 176 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure) | 45 |
import unittest
import numpy as np
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , ) -> np.ndarray:
UpperCamelCase_: str = np.shape(UpperCAmelCase__ )
UpperCamelCase_: str = np.shape(UpperCAmelCase__ )
UpperCamelCase_: List[Any] = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
UpperCamelCase_: Any = (
'Expected the same number of rows for A and B. '
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
UpperCamelCase_: int = (
'Expected the same number of columns for B and C. '
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
UpperCamelCase_: Dict = pseudo_inv
if a_inv is None:
try:
UpperCamelCase_: Optional[Any] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase_: Dict = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase_: Tuple = np.array([[2, 1], [6, 3]] )
UpperCamelCase_: Tuple = schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[Any] = np.block([[a, b], [b.T, c]] )
UpperCamelCase_: List[str] = np.linalg.det(_lowerCamelCase )
UpperCamelCase_: List[str] = np.linalg.det(_lowerCamelCase )
UpperCamelCase_: Dict = np.linalg.det(_lowerCamelCase )
self.assertAlmostEqual(_lowerCamelCase , det_a * det_s )
def _a ( self ):
UpperCamelCase_: int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase_: List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase_: List[str] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_lowerCamelCase ):
schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase_: str = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase_: List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_lowerCamelCase ):
schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 57 | 0 |
import requests
from bsa import BeautifulSoup
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_lowerCAmelCase : int = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE , params=SCREAMING_SNAKE_CASE ).content , "html.parser" )
_lowerCAmelCase : List[Any] = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : List[Any] = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
__UpperCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 3_0,
'pages': '3979-3990',
'year': 2_0_1_8,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 715 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( A , unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = KandinskyVaaControlnetImgaImgPipeline
_lowercase : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_lowercase : str = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_lowercase : Optional[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowercase : Tuple = False
@property
def __magic_name__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
return 3_2
@property
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Any ):
'''simple docstring'''
return 1_0_0
@property
def __magic_name__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(**A_ )
return model
@property
def __magic_name__ ( self : Dict ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self : int ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Tuple = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_lowerCAmelCase : int = DDIMScheduler(**A_ )
_lowerCAmelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __magic_name__ ( self : Union[str, Any] , A_ : Union[str, Any] , A_ : Optional[int]=0 ):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
_lowerCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
# create init_image
_lowerCAmelCase : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(A_ ) ).to(A_ )
_lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Tuple = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create hint
_lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith("mps" ):
_lowerCAmelCase : Tuple = torch.manual_seed(A_ )
else:
_lowerCAmelCase : Any = torch.Generator(device=A_ ).manual_seed(A_ )
_lowerCAmelCase : Union[str, Any] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**A_ )
_lowerCAmelCase : Optional[Any] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_lowerCAmelCase : List[str] = pipe(**self.get_dummy_inputs(A_ ) )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : str = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
_lowerCAmelCase : int = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCAmelCase : List[Any] = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
_lowerCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_lowerCAmelCase : Union[str, Any] = init_image.resize((5_1_2, 5_1_2) )
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
_lowerCAmelCase : Tuple = torch.from_numpy(np.array(A_ ) ).float() / 255.0
_lowerCAmelCase : Optional[int] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_lowerCAmelCase : List[str] = "A robot, 4k photo"
_lowerCAmelCase : Dict = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
_lowerCAmelCase : str = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
_lowerCAmelCase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = pipe_prior(
A_ , image=A_ , strength=0.85 , generator=A_ , negative_prompt="" , ).to_tuple()
_lowerCAmelCase : List[Any] = pipeline(
image=A_ , image_embeds=A_ , negative_image_embeds=A_ , hint=A_ , generator=A_ , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="np" , )
_lowerCAmelCase : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(A_ , A_ )
| 503 | 0 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
def __init__( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = ''
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 256
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
UpperCamelCase = cva.imread(A_ , 0 )
UpperCamelCase = copy.deepcopy(self.img )
UpperCamelCase , UpperCamelCase , UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCamelCase = np.sum(A_ )
for i in range(len(A_ ) ):
UpperCamelCase = x[i] / self.k
self.sk += prk
UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase = int(last % last )
UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 3 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """mctct"""
def __init__( self , A_=8065 , A_=1536 , A_=36 , A_=6144 , A_=4 , A_=384 , A_=920 , A_=1e-5 , A_=0.3 , A_="relu" , A_=0.02 , A_=0.3 , A_=0.3 , A_=1 , A_=0 , A_=2 , A_=1 , A_=0.3 , A_=1 , A_=(7,) , A_=(3,) , A_=80 , A_=1 , A_=None , A_="sum" , A_=False , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = num_attention_heads
UpperCamelCase = attention_head_dim
UpperCamelCase = max_position_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = layerdrop
UpperCamelCase = hidden_act
UpperCamelCase = initializer_range
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = conv_glu_dim
UpperCamelCase = conv_dropout
UpperCamelCase = num_conv_layers
UpperCamelCase = input_feat_per_channel
UpperCamelCase = input_channels
UpperCamelCase = conv_channels
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCamelCase = list(A_ )
UpperCamelCase = list(A_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 3 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
lowerCAmelCase_ = {
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCAmelCase_ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _A ( _lowerCamelCase ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : List[int] = []
_UpperCamelCase : List[int] = []
def __init__( self : List[str] , _A : Tuple , _A : str="<s>" , _A : Any="</s>" , _A : str="</s>" , _A : Any="<s>" , _A : Any="<unk>" , _A : Tuple="<pad>" , _A : List[str]="<mask>" , _A : Optional[Any]=None , _A : int=None , _A : List[str]=None , _A : Optional[Dict[str, Any]] = None , _A : Optional[int]=None , _A : Any=False , **_A : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
lowercase : str = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
lowercase : str = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase : List[Any] = legacy_behaviour
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , tokenizer_file=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_A , **_A , )
lowercase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
lowercase : Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : Union[str, Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Union[str, Any] = 1
lowercase : Union[str, Any] = len(self.sp_model )
lowercase : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A )
}
lowercase : List[str] = {v: k for k, v in self.lang_code_to_id.items()}
lowercase : str = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase : Optional[int] = src_lang if src_lang is not None else '''eng_Latn'''
lowercase : Optional[Any] = self.lang_code_to_id[self._src_lang]
lowercase : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Tuple = self.__dict__.copy()
lowercase : List[str] = None
lowercase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , _A : List[Any] ) -> str:
"""simple docstring"""
lowercase : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : int = {}
lowercase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __a ( self : List[Any] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __a ( self : Any , _A : str ) -> None:
"""simple docstring"""
lowercase : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __a ( self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
lowercase : List[Any] = [1] * len(self.prefix_tokens )
lowercase : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def __a ( self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase : str = [self.sep_token_id]
lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self : Union[str, Any] , _A : Any , _A : str , _A : Optional[str] , _A : Optional[str] , **_A : Optional[Any] ) -> int:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase : Tuple = src_lang
lowercase : Dict = self(_A , add_special_tokens=_A , return_tensors=_A , **_A )
lowercase : Any = self.convert_tokens_to_ids(_A )
lowercase : List[Any] = tgt_lang_id
return inputs
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self : List[Any] , _A : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_A , out_type=_A )
def __a ( self : List[Any] , _A : str ) -> List[str]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : List[str] = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __a ( self : Any , _A : Tuple ) -> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self : str , _A : Optional[Any] ) -> str:
"""simple docstring"""
lowercase : str = ''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def __a ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
lowercase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def __a ( self : Optional[int] , _A : List[str] , _A : str = "eng_Latn" , _A : Optional[List[str]] = None , _A : str = "fra_Latn" , **_A : Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
lowercase : Any = src_lang
lowercase : List[str] = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __a ( self : Union[str, Any] , _A : Optional[Any] ) -> None:
"""simple docstring"""
lowercase : str = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowercase : Union[str, Any] = []
lowercase : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowercase : List[Any] = [self.cur_lang_code]
lowercase : Any = [self.eos_token_id]
def __a ( self : Union[str, Any] , _A : str ) -> None:
"""simple docstring"""
lowercase : Tuple = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowercase : str = []
lowercase : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
lowercase : Optional[Any] = [self.cur_lang_code]
lowercase : List[Any] = [self.eos_token_id] | 596 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _A ( unittest.TestCase ):
def __a ( self : str ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : Dict = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : Dict = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : List[str] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
lowercase : Optional[int] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase : List[Any] = DDPMScheduler()
lowercase : Optional[int] = AudioDiffusionPipeline(vqvae=_A , unet=self.dummy_unet , mel=_A , scheduler=_A )
lowercase : Any = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowercase : Any = torch.Generator(device=_A ).manual_seed(42 )
lowercase : List[str] = pipe(generator=_A , steps=4 )
lowercase : List[str] = output.audios[0]
lowercase : List[str] = output.images[0]
lowercase : Any = torch.Generator(device=_A ).manual_seed(42 )
lowercase : str = pipe(generator=_A , steps=4 , return_dict=_A )
lowercase : Tuple = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowercase : Any = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
lowercase : str = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase : Dict = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase : List[Any] = DDIMScheduler()
lowercase : List[str] = self.dummy_vqvae_and_unet
lowercase : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_A , scheduler=_A )
lowercase : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
np.random.seed(0 )
lowercase : int = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase : List[str] = torch.Generator(device=_A ).manual_seed(42 )
lowercase : Tuple = pipe(raw_audio=_A , generator=_A , start_step=5 , steps=10 )
lowercase : Any = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase : Optional[int] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowercase : Dict = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase : Dict = self.dummy_unet_condition
lowercase : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_A , mel=_A , scheduler=_A )
lowercase : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
np.random.seed(0 )
lowercase : Dict = torch.rand((1, 1, 10) )
lowercase : Optional[int] = pipe(generator=_A , encoding=_A )
lowercase : int = output.images[0]
lowercase : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowercase : int = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def __a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ) -> int:
"""simple docstring"""
lowercase : Optional[int] = torch_device
lowercase : Optional[int] = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
lowercase : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowercase : Union[str, Any] = torch.Generator(device=_A ).manual_seed(42 )
lowercase : Dict = pipe(generator=_A )
lowercase : Union[str, Any] = output.audios[0]
lowercase : int = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowercase : int = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 | 596 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 1 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 0 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase_ = re.compile(R"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase_ = re.compile(R"""^\s*else:""")
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> Tuple:
'''simple docstring'''
if _re_test_backend.search(UpperCamelCase__ ) is None:
return None
_snake_case = [b[0] for b in _re_backend.findall(UpperCamelCase__ )]
backends.sort()
return "_and_".join(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
with open(UpperCamelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_snake_case = f.readlines()
_snake_case = 0
while line_index < len(UpperCamelCase__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase__ ):
return None
# First grab the objects without a specific backend in _import_structure
_snake_case = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
_snake_case = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase__ ):
_snake_case = _re_one_line_import_struct.search(UpperCamelCase__ ).groups()[0]
_snake_case = re.findall(R'\[([^\]]+)\]' , UpperCamelCase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
_snake_case = _re_import_struct_key_value.search(UpperCamelCase__ )
if single_line_import_search is not None:
_snake_case = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(UpperCamelCase__ ) > 0]
objects.extend(UpperCamelCase__ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
_snake_case = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_snake_case = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
_snake_case = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase__ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase__ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase__ ) is not None:
_snake_case = _re_import_struct_add_many.search(UpperCamelCase__ ).groups()[0].split(', ' )
_snake_case = [obj[1:-1] for obj in imports if len(UpperCamelCase__ ) > 0]
objects.extend(UpperCamelCase__ )
elif _re_between_brackets.search(UpperCamelCase__ ) is not None:
_snake_case = _re_between_brackets.search(UpperCamelCase__ ).groups()[0].split(', ' )
_snake_case = [obj[1:-1] for obj in imports if len(UpperCamelCase__ ) > 0]
objects.extend(UpperCamelCase__ )
elif _re_quote_object.search(UpperCamelCase__ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase__ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
_snake_case = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_snake_case = []
while (
line_index < len(UpperCamelCase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
_snake_case = lines[line_index]
_snake_case = _re_import.search(UpperCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_snake_case = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
_snake_case = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
_snake_case = lines[line_index]
_snake_case = _re_import.search(UpperCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_snake_case = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> Tuple:
'''simple docstring'''
def find_duplicates(UpperCamelCase__ : Dict ):
return [k for k, v in collections.Counter(UpperCamelCase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_snake_case = []
for key in import_dict_objects.keys():
_snake_case = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_snake_case = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_snake_case = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowerCamelCase__ ( ) -> List[Any]:
'''simple docstring'''
_snake_case = []
for root, _, files in os.walk(UpperCamelCase__ ):
if "__init__.py" in files:
_snake_case = os.path.join(UpperCamelCase__ , '__init__.py' )
_snake_case = parse_init(UpperCamelCase__ )
if objects is not None:
_snake_case = analyze_results(*UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_snake_case = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) > 0:
raise ValueError('\n\n'.join(UpperCamelCase__ ) )
def lowerCamelCase__ ( ) -> List[Any]:
'''simple docstring'''
_snake_case = []
for path, directories, files in os.walk(UpperCamelCase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(UpperCamelCase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase__ ) / folder).glob('*.py' ) ) ) == 0:
continue
_snake_case = str((Path(UpperCamelCase__ ) / folder).relative_to(UpperCamelCase__ ) )
_snake_case = short_path.replace(os.path.sep , '.' )
submodules.append(UpperCamelCase__ )
for fname in files:
if fname == "__init__.py":
continue
_snake_case = str((Path(UpperCamelCase__ ) / fname).relative_to(UpperCamelCase__ ) )
_snake_case = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(UpperCamelCase__ )
return submodules
UpperCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def lowerCamelCase__ ( ) -> Optional[int]:
'''simple docstring'''
from transformers.utils import direct_transformers_import
_snake_case = direct_transformers_import(UpperCamelCase__ )
_snake_case = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCamelCase__ , '__init__.py' ) , 'r' ) as f:
_snake_case = f.read()
import_structure_keys.update(set(re.findall(R'import_structure\[\"([^\"]*)\"\]' , UpperCamelCase__ ) ) )
_snake_case = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCamelCase__ ) > 0:
_snake_case = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 541 |
def lowerCamelCase__ ( UpperCamelCase__ : list ) -> list:
'''simple docstring'''
_snake_case = len(UpperCamelCase__ )
for i in range(1 , UpperCamelCase__ ):
_snake_case = collection[i]
_snake_case = 0
_snake_case = i - 1
while low <= high:
_snake_case = (low + high) // 2
if val < collection[mid]:
_snake_case = mid - 1
else:
_snake_case = mid + 1
for j in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
_snake_case = collection[j - 1]
_snake_case = val
return collection
if __name__ == "__main__":
UpperCAmelCase_ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 541 | 1 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowerCamelCase = 0
lowerCamelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowerCamelCase = tuple[int, int]
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
"""simple docstring"""
a__ : Tuple = pos_x
a__ : str = pos_y
a__ : Any = (pos_y, pos_x)
a__ : List[str] = goal_x
a__ : Optional[Any] = goal_y
a__ : int = g_cost
a__ : Any = parent
a__ : List[Any] = self.calculate_heuristic()
a__ : str = self.g_cost + self.h_cost
def _A ( self ):
"""simple docstring"""
a__ : int = self.pos_x - self.goal_x
a__ : str = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__UpperCAmelCase ) + abs(__UpperCAmelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , __UpperCAmelCase ):
"""simple docstring"""
return self.f_cost < other.f_cost
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Union[str, Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __UpperCAmelCase )
a__ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , __UpperCAmelCase )
a__ : Any = [self.start]
a__ : list[Node] = []
a__ : int = False
def _A ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__UpperCAmelCase )
self.closed_nodes.append(__UpperCAmelCase )
a__ : int = self.get_successors(__UpperCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__UpperCAmelCase )
else:
# retrieve the best current path
a__ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(__UpperCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__UpperCAmelCase )
else:
self.open_nodes.append(__UpperCAmelCase )
return [self.start.pos]
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Tuple = []
for action in delta:
a__ : Optional[Any] = parent.pos_x + action[1]
a__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__UpperCAmelCase , __UpperCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __UpperCAmelCase , ) )
return successors
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Optional[int] = node
a__ : List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a__ : Dict = current_node.parent
path.reverse()
return path
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Any = AStar(__UpperCAmelCase , __UpperCAmelCase )
a__ : Optional[int] = AStar(__UpperCAmelCase , __UpperCAmelCase )
a__ : str = False
def _A ( self ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
a__ : Dict = self.fwd_astar.open_nodes.pop(0 )
a__ : Any = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__UpperCAmelCase , __UpperCAmelCase )
self.fwd_astar.closed_nodes.append(__UpperCAmelCase )
self.bwd_astar.closed_nodes.append(__UpperCAmelCase )
a__ : str = current_bwd_node
a__ : int = current_fwd_node
a__ : str = {
self.fwd_astar: self.fwd_astar.get_successors(__UpperCAmelCase ),
self.bwd_astar: self.bwd_astar.get_successors(__UpperCAmelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__UpperCAmelCase )
else:
# retrieve the best current path
a__ : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(__UpperCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__UpperCAmelCase )
else:
astar.open_nodes.append(__UpperCAmelCase )
return [self.fwd_astar.start.pos]
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Union[str, Any] = self.fwd_astar.retrace_path(__UpperCAmelCase )
a__ : Optional[Any] = self.bwd_astar.retrace_path(__UpperCAmelCase )
bwd_path.pop()
bwd_path.reverse()
a__ : Union[str, Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowerCamelCase = (0, 0)
lowerCamelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase = time.time()
lowerCamelCase = AStar(init, goal)
lowerCamelCase = a_star.search()
lowerCamelCase = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
lowerCamelCase = time.time()
lowerCamelCase = BidirectionalAStar(init, goal)
lowerCamelCase = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 191 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
# load base model
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
SCREAMING_SNAKE_CASE : Any = load_file(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
SCREAMING_SNAKE_CASE : Dict = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : Tuple = pipeline.text_encoder
else:
SCREAMING_SNAKE_CASE : Any = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : Dict = pipeline.unet
# find the target layer
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_infos.pop(0 )
while len(__lowerCAmelCase ) > -1:
try:
SCREAMING_SNAKE_CASE : List[Any] = curr_layer.__getattr__(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE : Optional[int] = layer_infos.pop(0 )
elif len(__lowerCAmelCase ) == 0:
break
except Exception:
if len(__lowerCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
SCREAMING_SNAKE_CASE : List[str] = layer_infos.pop(0 )
SCREAMING_SNAKE_CASE : str = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(__lowerCAmelCase )
else:
pair_keys.append(__lowerCAmelCase )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
SCREAMING_SNAKE_CASE : str = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCAmelCase , __lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
SCREAMING_SNAKE_CASE : List[Any] = state_dict[pair_keys[0]].to(torch.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCAmelCase , __lowerCAmelCase )
# update visited list
for item in pair_keys:
visited.append(__lowerCAmelCase )
return pipeline
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_lowerCamelCase : List[Any] = parser.parse_args()
_lowerCamelCase : List[str] = args.base_model_path
_lowerCamelCase : str = args.checkpoint_path
_lowerCamelCase : Dict = args.dump_path
_lowerCamelCase : Any = args.lora_prefix_unet
_lowerCamelCase : List[Any] = args.lora_prefix_text_encoder
_lowerCamelCase : List[Any] = args.alpha
_lowerCamelCase : Optional[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowerCamelCase : Optional[Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 352 | 0 |
def a ( A__ : List[str] , A__ : Optional[Any] ) -> Any:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(A__ ):
for j in range(A__ ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def a ( A__ : List[str] , A__ : Any ) -> Optional[int]:
"""simple docstring"""
_lowercase =[[float('inf' ) for _ in range(A__ )] for _ in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
_lowercase =graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(A__ ):
# looping through rows of graph array
for i in range(A__ ):
# looping through columns of graph array
for j in range(A__ ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_lowercase =dist[i][k] + dist[k][j]
_print_dist(A__ , A__ )
return dist, v
if __name__ == "__main__":
lowercase_ = int(input('Enter number of vertices: '))
lowercase_ = int(input('Enter number of edges: '))
lowercase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowercase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowercase_ = int(input('Enter source:'))
lowercase_ = int(input('Enter destination:'))
lowercase_ = float(input('Enter weight:'))
lowercase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 720 |
from math import pow, sqrt
def a ( *A__ : float ) -> bool:
"""simple docstring"""
_lowercase =len(A__ ) > 0 and all(value > 0.0 for value in values )
return result
def a ( A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 380 | 0 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class a_ ( datasets.BuilderConfig ):
lowercase_ : Optional[datasets.Features] = None
lowercase_ : str = "utf-8"
lowercase_ : Optional[str] = None
lowercase_ : Optional[str] = None
lowercase_ : bool = True # deprecated
lowercase_ : Optional[int] = None # deprecated
lowercase_ : int = 10 << 20 # 10MB
lowercase_ : Optional[bool] = None
class a_ ( datasets.ArrowBasedBuilder ):
lowercase_ : int = JsonConfig
def lowercase__ ( self : int ):
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
__snake_case = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : int , __lowerCAmelCase : Dict ):
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
__snake_case = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCAmelCase , (str, list, tuple) ):
__snake_case = data_files
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__snake_case = [files]
__snake_case = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
__snake_case = []
for split_name, files in data_files.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__snake_case = [files]
__snake_case = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCAmelCase , gen_kwargs={'files': files} ) )
return splits
def lowercase__ ( self : List[str] , __lowerCAmelCase : pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__snake_case = self.config.features.arrow_schema.field(__lowerCAmelCase ).type
__snake_case = pa_table.append_column(__lowerCAmelCase , pa.array([None] * len(__lowerCAmelCase ) , type=__lowerCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case = table_cast(__lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def lowercase__ ( self : Tuple , __lowerCAmelCase : Any ):
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__snake_case = json.load(__lowerCAmelCase )
# We keep only the field we are interested in
__snake_case = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__lowerCAmelCase , (list, tuple) ):
__snake_case = set().union(*[row.keys() for row in dataset] )
__snake_case = {col: [row.get(__lowerCAmelCase ) for row in dataset] for col in keys}
else:
__snake_case = dataset
__snake_case = pa.Table.from_pydict(__lowerCAmelCase )
yield file_idx, self._cast_table(__lowerCAmelCase )
# If the file has one json object per line
else:
with open(__lowerCAmelCase , 'rb' ) as f:
__snake_case = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__snake_case = max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
__snake_case = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
__snake_case = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__lowerCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__snake_case = batch.decode(self.config.encoding , errors=__lowerCAmelCase ).encode('utf-8' )
try:
while True:
try:
__snake_case = paj.read_json(
io.BytesIO(__lowerCAmelCase ) , read_options=paj.ReadOptions(block_size=__lowerCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__lowerCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(__lowerCAmelCase )
or block_size > len(__lowerCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(__lowerCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__snake_case = json.load(__lowerCAmelCase )
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(__lowerCAmelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__lowerCAmelCase , __lowerCAmelCase ): # list is the only sequence type supported in JSON
try:
__snake_case = set().union(*[row.keys() for row in dataset] )
__snake_case = {col: [row.get(__lowerCAmelCase ) for row in dataset] for col in keys}
__snake_case = pa.Table.from_pydict(__lowerCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(__lowerCAmelCase )}: {e}' )
raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(__lowerCAmelCase )
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(__lowerCAmelCase )}: {e}' )
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCAmelCase )
batch_idx += 1
| 356 |
'''simple docstring'''
def lowerCamelCase__ ( a ):
assert (
isinstance(a , a ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
__snake_case , __snake_case = 1, 1
for _ in range(number_of_steps - 1 ):
__snake_case , __snake_case = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : str=1_3 , __lowerCamelCase : Any=3_0 , __lowerCamelCase : Any=2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=True , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=3_2 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : Optional[Any]=3_7 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=1_0 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Tuple=None , __lowerCamelCase : Dict=2 , ) -> Any:
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = scope
__magic_name__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ = (image_size // patch_size) ** 2
__magic_name__ = num_patches + 1
def _snake_case ( self : Dict ) -> Tuple:
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : int ) -> Any:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _snake_case ( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ) -> List[str]:
__magic_name__ = ViTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__magic_name__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : List[str] ) -> Any:
__magic_name__ = ViTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__magic_name__ = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ = 1
__magic_name__ = ViTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _snake_case ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) -> Tuple:
__magic_name__ = self.type_sequence_label_size
__magic_name__ = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__magic_name__ = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ = 1
__magic_name__ = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self : List[str] ) -> str:
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCAmelCase__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _snake_case ( self : Optional[int] ) -> str:
__magic_name__ = ViTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 )
def _snake_case ( self : Any ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def _snake_case ( self : str ) -> Dict:
pass
def _snake_case ( self : Optional[int] ) -> Optional[Any]:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : Union[str, Any] ) -> Optional[Any]:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Optional[int] ) -> int:
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : Any ) -> Optional[int]:
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def _snake_case ( self : int ) -> Optional[int]:
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : int ) -> str:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = ViTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _lowerCAmelCase ( ):
'''simple docstring'''
__magic_name__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self : Dict ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def _snake_case ( self : Optional[int] ) -> List[str]:
__magic_name__ = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__lowerCamelCase )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__magic_name__ = model(**__lowerCamelCase )
# verify the logits
__magic_name__ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__magic_name__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
def _snake_case ( self : int ) -> Any:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__magic_name__ = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__lowerCamelCase )
__magic_name__ = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=4_8_0 )
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=__lowerCamelCase , return_tensors="pt" )
__magic_name__ = inputs.pixel_values.to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__magic_name__ = model(__lowerCamelCase , interpolate_pos_encoding=__lowerCamelCase )
# verify the logits
__magic_name__ = torch.Size((1, 3_6_0_1, 3_8_4) )
self.assertEqual(outputs.last_hidden_state.shape , __lowerCamelCase )
__magic_name__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _snake_case ( self : Any ) -> List[Any]:
__magic_name__ = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=__lowerCamelCase , return_tensors="pt" )
__magic_name__ = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__magic_name__ = model(__lowerCamelCase )
| 468 |
"""simple docstring"""
from copy import deepcopy
class A_ :
def __init__( self : List[str] , __lowerCamelCase : list[int] | None = None , __lowerCamelCase : int | None = None ) -> None:
if arr is None and size is not None:
__magic_name__ = size
__magic_name__ = [0] * size
elif arr is not None:
self.init(__lowerCamelCase )
else:
raise ValueError("Either arr or size must be specified" )
def _snake_case ( self : Optional[int] , __lowerCamelCase : list[int] ) -> None:
__magic_name__ = len(__lowerCamelCase )
__magic_name__ = deepcopy(__lowerCamelCase )
for i in range(1 , self.size ):
__magic_name__ = self.next_(__lowerCamelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def _snake_case ( self : Any ) -> list[int]:
__magic_name__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
__magic_name__ = self.next_(__lowerCamelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _snake_case ( __lowerCamelCase : int ) -> int:
return index + (index & (-index))
@staticmethod
def _snake_case ( __lowerCamelCase : int ) -> int:
return index - (index & (-index))
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
__magic_name__ = self.next_(__lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : int , __lowerCamelCase : int ) -> None:
self.add(__lowerCamelCase , value - self.get(__lowerCamelCase ) )
def _snake_case ( self : List[Any] , __lowerCamelCase : int ) -> int:
if right == 0:
return 0
__magic_name__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
__magic_name__ = self.prev(__lowerCamelCase )
return result
def _snake_case ( self : Any , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
return self.prefix(__lowerCamelCase ) - self.prefix(__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : int ) -> int:
return self.query(__lowerCamelCase , index + 1 )
def _snake_case ( self : Tuple , __lowerCamelCase : int ) -> int:
value -= self.tree[0]
if value < 0:
return -1
__magic_name__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
__magic_name__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 468 | 1 |
"""simple docstring"""
import math
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not isinstance(lowercase ,lowercase ):
_UpperCAmelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase )
if number < 1:
_UpperCAmelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_UpperCAmelCase = int(math.log(number // 3 ,2 ) ) + 2
_UpperCAmelCase = [3, 5]
_UpperCAmelCase = 2
_UpperCAmelCase = 3
for block in range(1 ,lowercase ):
for _ in range(lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
UpperCAmelCase__ = 0
try:
UpperCAmelCase__ = proth(number)
except ValueError:
print(F'''ValueError: there is no {number}th Proth number''')
continue
print(F'''The {number}th Proth number: {value}''')
| 277 | """simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" ,[
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(lowercase ,i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = _distribute_shards(**lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" ,[
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = _split_gen_kwargs(lowercase ,lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" ,[
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] ,)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(lowercase ):
_number_of_shards_in_gen_kwargs(lowercase )
else:
_UpperCAmelCase = _number_of_shards_in_gen_kwargs(lowercase )
assert out == expected
| 277 | 1 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : int = LxmertTokenizer
_UpperCamelCase : Union[str, Any] = LxmertTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : str = True
def _snake_case ( self ) -> Any:
"""simple docstring"""
super().setUp()
a__ : Optional[int] = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _snake_case ( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "UNwant\u00E9d,running"
a__ : Union[str, Any] = "unwanted, running"
return input_text, output_text
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Dict = self.tokenizer_class(self.vocab_file )
a__ : Optional[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a__ : Tuple = self.get_tokenizer()
a__ : int = self.get_rust_tokenizer()
a__ : Tuple = "I was born in 92000, and this is falsé."
a__ : Tuple = tokenizer.tokenize(_UpperCAmelCase )
a__ : Union[str, Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
a__ : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
a__ : Optional[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
a__ : List[Any] = self.get_rust_tokenizer()
a__ : Any = tokenizer.encode(_UpperCAmelCase )
a__ : Any = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 714 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[int] = StableUnCLIPPipeline
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCamelCase : Any = False
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Any = 32
a__ : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , )
torch.manual_seed(0 )
a__ : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , )
torch.manual_seed(0 )
a__ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a__ : Optional[int] = AutoencoderKL()
a__ : Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> Dict:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : Union[str, Any] = torch.manual_seed(snake_case )
else:
a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" )
a__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
a__ : Union[str, Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Union[str, Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
a__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 629 | 0 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCAmelCase__ ( *__magic_name__ : Dict , __magic_name__ : Tuple = None , __magic_name__ : int=True , __magic_name__ : int=2 ):
'''simple docstring'''
from .. import __version__
lowerCAmelCase : int = take_from
lowerCAmelCase : Union[str, Any] = ()
if not isinstance(args[0] , __snake_case ):
lowerCAmelCase : Union[str, Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__snake_case ).base_version ) >= version.parse(__snake_case ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
lowerCAmelCase : Union[str, Any] = None
if isinstance(__snake_case , __snake_case ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__snake_case ),)
lowerCAmelCase : int = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__snake_case , __snake_case ):
values += (getattr(__snake_case , __snake_case ),)
lowerCAmelCase : Any = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
lowerCAmelCase : int = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
lowerCAmelCase : Optional[Any] = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __snake_case , stacklevel=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0:
lowerCAmelCase : Dict = inspect.getouterframes(inspect.currentframe() )[1]
lowerCAmelCase : Any = call_frame.filename
lowerCAmelCase : Optional[int] = call_frame.lineno
lowerCAmelCase : Dict = call_frame.function
lowerCAmelCase , lowerCAmelCase : Any = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(__snake_case ) == 0:
return
elif len(__snake_case ) == 1:
return values[0]
return values
| 348 |
from __future__ import annotations
import os
from typing import Any
import requests
lowerCamelCase : Tuple = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCamelCase : int = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowerCamelCase : Any = os.environ.get('''USER_TOKEN''', '''''')
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = {
"Authorization": F"""token {auth_token}""",
"Accept": "application/vnd.github.v3+json",
}
return requests.get(__snake_case , headers=__snake_case ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 367 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 708 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 | 0 |
import math
from datetime import datetime, timedelta
def a_ ( lowerCAmelCase_ : int ):
__lowerCAmelCase = year % 19
__lowerCAmelCase = year % 4
__lowerCAmelCase = year % 7
__lowerCAmelCase = math.floor(year / 100 )
__lowerCAmelCase = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCAmelCase = leap_day_inhibits / 4
__lowerCAmelCase = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCAmelCase = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCAmelCase = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCAmelCase = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase_, 4, 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase_, 4, 18 )
else:
return datetime(lowerCAmelCase_, 3, 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
_snake_case : Union[str, Any] = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 53 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__magic_name__ : Optional[Any] = 'http://www.mocksite.com/file1.txt'
__magic_name__ : Tuple = '"text": ["foo", "foo"]'
__magic_name__ : str = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class __snake_case :
__a = 200
__a = {'''Content-Length''': '''100'''}
__a = {}
def __a ( self: List[str] , **A_: List[Any] ):
return [bytes(A_ , """utf-8""" )]
def a_ ( *lowercase__ :List[Any], **lowercase__ :Any ):
return MockResponse()
@pytest.mark.parametrize("""urls_type""", [str, list, dict] )
def a_ ( lowercase__ :Optional[int], lowercase__ :Any, lowercase__ :Optional[int] ):
import requests
monkeypatch.setattr(lowercase__, """request""", lowercase__ )
__lowerCamelCase = URL
if issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = url
elif issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = [url]
elif issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = {"""train""": url}
__lowerCamelCase = """dummy"""
__lowerCamelCase = """downloads"""
__lowerCamelCase = tmp_path
__lowerCamelCase = DownloadConfig(
cache_dir=os.path.join(lowercase__, lowercase__ ), use_etag=lowercase__, )
__lowerCamelCase = DownloadManager(dataset_name=lowercase__, download_config=lowercase__ )
__lowerCamelCase = dl_manager.download(lowercase__ )
__lowerCamelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowercase__, lowercase__ ):
__lowerCamelCase = [downloaded_paths]
__lowerCamelCase = [urls]
elif isinstance(lowercase__, lowercase__ ):
assert "train" in downloaded_paths.keys()
__lowerCamelCase = downloaded_paths.values()
__lowerCamelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowercase__, lowercase__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__lowerCamelCase = Path(lowercase__ )
__lowerCamelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__lowerCamelCase = downloaded_path.read_text()
assert content == CONTENT
__lowerCamelCase = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__lowerCamelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""", [str, list, dict] )
def a_ ( lowercase__ :Dict, lowercase__ :Optional[Any], lowercase__ :Dict ):
__lowerCamelCase = str(lowercase__ )
if issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = filename
elif issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = [filename]
elif issubclass(lowercase__, lowercase__ ):
__lowerCamelCase = {"""train""": filename}
__lowerCamelCase = """dummy"""
__lowerCamelCase = xz_file.parent
__lowerCamelCase = """extracted"""
__lowerCamelCase = DownloadConfig(
cache_dir=lowercase__, use_etag=lowercase__, )
__lowerCamelCase = DownloadManager(dataset_name=lowercase__, download_config=lowercase__ )
__lowerCamelCase = dl_manager.extract(lowercase__ )
__lowerCamelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowercase__, lowercase__ ):
__lowerCamelCase = [extracted_paths]
__lowerCamelCase = [paths]
elif isinstance(lowercase__, lowercase__ ):
assert "train" in extracted_paths.keys()
__lowerCamelCase = extracted_paths.values()
__lowerCamelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowercase__, lowercase__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__lowerCamelCase = Path(lowercase__ )
__lowerCamelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowercase__, etag=lowercase__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__lowerCamelCase = extracted_path.read_text()
__lowerCamelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def a_ ( lowercase__ :List[str], lowercase__ :int ):
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(lowercase__, start=1 ):
__lowerCamelCase = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""", ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def a_ ( lowercase__ :Optional[int], lowercase__ :Union[str, Any] ):
__lowerCamelCase = request.getfixturevalue(lowercase__ )
__lowerCamelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowercase__ ), start=1 ):
_test_jsonl(lowercase__, lowercase__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""", ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def a_ ( lowercase__ :Optional[int], lowercase__ :List[Any] ):
__lowerCamelCase = request.getfixturevalue(lowercase__ )
__lowerCamelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowercase__ ), start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowercase__ ), start=1 ):
_test_jsonl(lowercase__, lowercase__ )
assert num_tar == 1
assert num_jsonl == 2
def a_ ( lowercase__ :Tuple ):
__lowerCamelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowercase__ ), start=1 ):
assert os.path.basename(lowercase__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 281 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase__ = "hf-internal-testing/tiny-random-bert"
lowercase__ = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
lowercase__ = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = cached_file(_lowercase , _lowercase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_lowercase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_lowercase , _lowercase ) ) )
with open(os.path.join(_lowercase , """refs""" , """main""" ) ) as f:
__a : Any = f.read()
self.assertEqual(_lowercase , os.path.join(_lowercase , """snapshots""" , _lowercase , _lowercase ) )
self.assertTrue(os.path.isfile(_lowercase ) )
# File is cached at the same place the second time.
__a : List[Any] = cached_file(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Using a specific revision to test the full commit hash.
__a : List[Any] = cached_file(_lowercase , _lowercase , revision="""9b8c223""" )
self.assertEqual(_lowercase , os.path.join(_lowercase , """snapshots""" , _lowercase , _lowercase ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
with self.assertRaisesRegex(_lowercase , """is not a valid model identifier""" ):
__a : Union[str, Any] = cached_file("""tiny-random-bert""" , _lowercase )
with self.assertRaisesRegex(_lowercase , """is not a valid git identifier""" ):
__a : Tuple = cached_file(_lowercase , _lowercase , revision="""aaaa""" )
with self.assertRaisesRegex(_lowercase , """does not appear to have a file named""" ):
__a : str = cached_file(_lowercase , """conf""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
with self.assertRaisesRegex(_lowercase , """does not appear to have a file named""" ):
__a : Any = cached_file(_lowercase , """conf""" )
with open(os.path.join(_lowercase , """refs""" , """main""" ) ) as f:
__a : Any = f.read()
self.assertTrue(os.path.isfile(os.path.join(_lowercase , """.no_exist""" , _lowercase , """conf""" ) ) )
__a : List[Any] = cached_file(_lowercase , """conf""" , _raise_exceptions_for_missing_entries=_lowercase )
self.assertIsNone(_lowercase )
__a : str = cached_file(_lowercase , """conf""" , local_files_only=_lowercase , _raise_exceptions_for_missing_entries=_lowercase )
self.assertIsNone(_lowercase )
__a : List[Any] = mock.Mock()
__a : List[Any] = 500
__a : int = {}
__a : Any = HTTPError
__a : Union[str, Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=_lowercase ) as mock_head:
__a : Union[str, Any] = cached_file(_lowercase , """conf""" , _raise_exceptions_for_connection_errors=_lowercase )
self.assertIsNone(_lowercase )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__(self ):
'''simple docstring'''
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _lowercase ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _lowercase ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _lowercase ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_lowercase , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , _lowercase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_lowercase , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , _lowercase , revision="""ahaha""" )
__a : str = get_file_from_repo("""bert-base-cased""" , _lowercase )
# The name is the cached name which is not very easy to test, so instead we load the content.
__a : Union[str, Any] = json.loads(open(_lowercase , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def lowerCAmelCase__(self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Any = Path(_lowercase ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(_lowercase , """a.txt""" ) , str(_lowercase ) )
self.assertIsNone(get_file_from_repo(_lowercase , """b.txt""" ) )
| 63 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "unispeech"
def __init__(self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(512, 512, 512, 512, 512, 512, 512) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(10, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=False , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase=320 , _lowercase=2 , _lowercase=0.1 , _lowercase=100 , _lowercase=256 , _lowercase=256 , _lowercase=0.1 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=80 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=0.5 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
__a : Union[str, Any] = hidden_size
__a : Any = feat_extract_norm
__a : Union[str, Any] = feat_extract_activation
__a : Tuple = list(_lowercase )
__a : Dict = list(_lowercase )
__a : List[Any] = list(_lowercase )
__a : List[Any] = conv_bias
__a : Optional[Any] = num_conv_pos_embeddings
__a : Union[str, Any] = num_conv_pos_embedding_groups
__a : Dict = len(self.conv_dim )
__a : Dict = num_hidden_layers
__a : Union[str, Any] = intermediate_size
__a : List[str] = hidden_act
__a : int = num_attention_heads
__a : int = hidden_dropout
__a : Any = attention_dropout
__a : List[Any] = activation_dropout
__a : List[Any] = feat_proj_dropout
__a : Union[str, Any] = final_dropout
__a : str = layerdrop
__a : Dict = layer_norm_eps
__a : Dict = initializer_range
__a : Union[str, Any] = num_ctc_classes
__a : List[Any] = vocab_size
__a : Any = do_stable_layer_norm
__a : List[str] = use_weighted_layer_sum
__a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Dict = apply_spec_augment
__a : Union[str, Any] = mask_time_prob
__a : List[str] = mask_time_length
__a : Dict = mask_time_min_masks
__a : List[Any] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a : List[Any] = num_codevectors_per_group
__a : Union[str, Any] = num_codevector_groups
__a : List[Any] = contrastive_logits_temperature
__a : Any = feat_quantizer_dropout
__a : Optional[int] = num_negatives
__a : List[str] = codevector_dim
__a : List[Any] = proj_codevector_dim
__a : Tuple = diversity_loss_weight
# ctc loss
__a : Any = ctc_loss_reduction
__a : List[str] = ctc_zero_infinity
# pretraining loss
__a : Tuple = replace_prob
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 63 | 1 |
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
if length <= 0 or not isinstance(lowercase , lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 70 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_A = logging.get_logger(__name__)
# General docstring
_A = """RegNetConfig"""
# Base docstring
_A = """facebook/regnet-y-040"""
_A = [1, 10_88, 7, 7]
# Image classification docstring
_A = """facebook/regnet-y-040"""
_A = """tabby, tabby cat"""
_A = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ) -> Any:
'''simple docstring'''
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__SCREAMING_SNAKE_CASE : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__SCREAMING_SNAKE_CASE : Tuple = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='''VALID''' , groups=lowercase , use_bias=lowercase , name='''convolution''' , )
__SCREAMING_SNAKE_CASE : Any = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
__SCREAMING_SNAKE_CASE : List[Any] = ACTaFN[activation] if activation is not None else tf.identity
def _snake_case ( self , lowercase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = self.convolution(self.padding(lowercase ) )
__SCREAMING_SNAKE_CASE : int = self.normalization(lowercase )
__SCREAMING_SNAKE_CASE : Tuple = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase , **lowercase ) -> Dict:
'''simple docstring'''
super().__init__(**lowercase )
__SCREAMING_SNAKE_CASE : str = config.num_channels
__SCREAMING_SNAKE_CASE : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _snake_case ( self , lowercase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.embedder(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase , lowercase = 2 , **lowercase ) -> str:
'''simple docstring'''
super().__init__(**lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='''convolution''' )
__SCREAMING_SNAKE_CASE : Any = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def _snake_case ( self , lowercase , lowercase = False ) -> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase , lowercase , **lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowercase )
__SCREAMING_SNAKE_CASE : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='''pooler''' )
__SCREAMING_SNAKE_CASE : Dict = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _snake_case ( self , lowercase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = self.pooler(lowercase )
for layer_module in self.attention:
__SCREAMING_SNAKE_CASE : Optional[Any] = layer_module(lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ) -> int:
'''simple docstring'''
super().__init__(**lowercase )
__SCREAMING_SNAKE_CASE : int = in_channels != out_channels or stride != 1
__SCREAMING_SNAKE_CASE : Optional[int] = max(1 , out_channels // config.groups_width )
__SCREAMING_SNAKE_CASE : List[str] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='''layer.2''' ),
]
__SCREAMING_SNAKE_CASE : Optional[int] = ACTaFN[config.hidden_act]
def _snake_case ( self , lowercase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = hidden_state
for layer_module in self.layers:
__SCREAMING_SNAKE_CASE : List[Any] = layer_module(lowercase )
__SCREAMING_SNAKE_CASE : Any = self.shortcut(lowercase )
hidden_state += residual
__SCREAMING_SNAKE_CASE : str = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowercase )
__SCREAMING_SNAKE_CASE : Tuple = in_channels != out_channels or stride != 1
__SCREAMING_SNAKE_CASE : Tuple = max(1 , out_channels // config.groups_width )
__SCREAMING_SNAKE_CASE : Tuple = (
TFRegNetShortCut(lowercase , stride=lowercase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
__SCREAMING_SNAKE_CASE : Optional[Any] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='''layer.3''' ),
]
__SCREAMING_SNAKE_CASE : Any = ACTaFN[config.hidden_act]
def _snake_case ( self , lowercase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = hidden_state
for layer_module in self.layers:
__SCREAMING_SNAKE_CASE : Dict = layer_module(lowercase )
__SCREAMING_SNAKE_CASE : int = self.shortcut(lowercase )
hidden_state += residual
__SCREAMING_SNAKE_CASE : int = self.activation(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ) -> int:
'''simple docstring'''
super().__init__(**lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
__SCREAMING_SNAKE_CASE : Dict = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='''layers.0''' ),
*[layer(lowercase , lowercase , lowercase , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def _snake_case ( self , lowercase ) -> List[str]:
'''simple docstring'''
for layer_module in self.layers:
__SCREAMING_SNAKE_CASE : int = layer_module(lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
def __init__( self , lowercase , **lowercase ) -> Tuple:
'''simple docstring'''
super().__init__(**lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
__SCREAMING_SNAKE_CASE : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=f"""stages.{i+1}""" ) )
def _snake_case ( self , lowercase , lowercase = False , lowercase = True ) -> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__SCREAMING_SNAKE_CASE : List[Any] = hidden_states + (hidden_state,)
__SCREAMING_SNAKE_CASE : Optional[int] = stage_module(lowercase )
if output_hidden_states:
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
__a : Any = RegNetConfig
def __init__( self , lowercase , **lowercase ) -> Tuple:
'''simple docstring'''
super().__init__(**lowercase )
__SCREAMING_SNAKE_CASE : int = config
__SCREAMING_SNAKE_CASE : List[Any] = TFRegNetEmbeddings(lowercase , name='''embedder''' )
__SCREAMING_SNAKE_CASE : Dict = TFRegNetEncoder(lowercase , name='''encoder''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='''pooler''' )
@unpack_inputs
def _snake_case ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE : str = self.embedder(lowercase , training=lowercase )
__SCREAMING_SNAKE_CASE : Any = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
__SCREAMING_SNAKE_CASE : List[str] = encoder_outputs[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
__SCREAMING_SNAKE_CASE : List[Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
__SCREAMING_SNAKE_CASE : int = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__SCREAMING_SNAKE_CASE : Optional[Any] = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE_ ( snake_case ):
__a : int = RegNetConfig
__a : List[Any] = '''regnet'''
__a : Optional[int] = '''pixel_values'''
@property
def _snake_case ( self ) -> Any:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_A = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_A = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , snake_case , )
class SCREAMING_SNAKE_CASE_ ( snake_case ):
def __init__( self , lowercase , *lowercase , **lowercase ) -> List[str]:
'''simple docstring'''
super().__init__(lowercase , *lowercase , **lowercase )
__SCREAMING_SNAKE_CASE : Tuple = TFRegNetMainLayer(lowercase , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , snake_case , )
class SCREAMING_SNAKE_CASE_ ( snake_case , snake_case ):
def __init__( self , lowercase , *lowercase , **lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowercase , *lowercase , **lowercase )
__SCREAMING_SNAKE_CASE : Tuple = config.num_labels
__SCREAMING_SNAKE_CASE : str = TFRegNetMainLayer(lowercase , name='''regnet''' )
# classification head
__SCREAMING_SNAKE_CASE : List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
__SCREAMING_SNAKE_CASE : str = self.classifier[0](lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.classifier[1](lowercase )
__SCREAMING_SNAKE_CASE : Tuple = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
__SCREAMING_SNAKE_CASE : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 158 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 703 | '''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( a : list[int] , a : int ) -> list[int]:
"""simple docstring"""
a__ :int = 0
a__ :Any = len(a ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
a__ :Any = i + 1
else:
a__ :Tuple = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 395 |
def lowerCamelCase__ ( a : list , a : list , a : int , a : int , a : int ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
a__ :str = 0
a__ :Union[str, Any] = 0
a__ :Optional[int] = knapsack(a , a , a , a , index + 1 )
if weights[index] <= max_weight:
a__ :str = values[index] + knapsack(
a , a , a , max_weight - weights[index] , index + 1 )
return max(a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 395 | 1 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowercase_ = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
warnings.warn(__A , __A )
requires_backends(__A , 'sklearn' )
return (preds == labels).mean()
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
'''simple docstring'''
warnings.warn(__A , __A )
requires_backends(__A , 'sklearn' )
A__ = simple_accuracy(__A , __A )
A__ = fa_score(y_true=__A , y_pred=__A )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
'''simple docstring'''
warnings.warn(__A , __A )
requires_backends(__A , 'sklearn' )
A__ = pearsonr(__A , __A )[0]
A__ = spearmanr(__A , __A )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ) -> Any:
'''simple docstring'''
warnings.warn(__A , __A )
requires_backends(__A , 'sklearn' )
assert len(__A ) == len(__A ), f'Predictions and labels have mismatched lengths {len(__A )} and {len(__A )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(__A , __A )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "mrpc":
return acc_and_fa(__A , __A )
elif task_name == "sts-b":
return pearson_and_spearman(__A , __A )
elif task_name == "qqp":
return acc_and_fa(__A , __A )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__A , __A )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__A , __A )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "rte":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "hans":
return {"acc": simple_accuracy(__A , __A )}
else:
raise KeyError(__A )
def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__A , __A )
requires_backends(__A , 'sklearn' )
if len(__A ) != len(__A ):
raise ValueError(f'Predictions and labels have mismatched lengths {len(__A )} and {len(__A )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(__A , __A )}
else:
raise KeyError(__A )
| 701 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowercase_ = random.Random()
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Tuple=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple,lowercase_ : str,lowercase_ : Optional[Any]=7,lowercase_ : Union[str, Any]=4_0_0,lowercase_ : Optional[int]=2_0_0_0,lowercase_ : Dict=2_0_4_8,lowercase_ : int=1_2_8,lowercase_ : str=1,lowercase_ : List[Any]=5_1_2,lowercase_ : Union[str, Any]=3_0,lowercase_ : Any=4_4_1_0_0,)-> Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = spectrogram_length
A__ = feature_size
A__ = num_audio_channels
A__ = hop_length
A__ = chunk_length
A__ = sampling_rate
def snake_case__ ( self : Tuple )-> Dict:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case__ ( self : Tuple,lowercase_ : List[Any]=False,lowercase_ : Optional[int]=False )-> str:
'''simple docstring'''
def _flatten(lowercase_ : Any ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length,self.max_seq_length,self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = TvltFeatureExtractor
def snake_case__ ( self : Optional[Any] )-> Dict:
'''simple docstring'''
A__ = TvltFeatureExtractionTester(self )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowercase_,'spectrogram_length' ) )
self.assertTrue(hasattr(lowercase_,'feature_size' ) )
self.assertTrue(hasattr(lowercase_,'num_audio_channels' ) )
self.assertTrue(hasattr(lowercase_,'hop_length' ) )
self.assertTrue(hasattr(lowercase_,'chunk_length' ) )
self.assertTrue(hasattr(lowercase_,'sampling_rate' ) )
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(lowercase_ )[0]
check_json_file_has_correct_format(lowercase_ )
A__ = self.feature_extraction_class.from_pretrained(lowercase_ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('mel_filters' )
A__ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowercase_,lowercase_ ) )
self.assertEqual(lowercase_,lowercase_ )
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(lowercase_,'feat_extract.json' )
feat_extract_first.to_json_file(lowercase_ )
A__ = self.feature_extraction_class.from_json_file(lowercase_ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('mel_filters' )
A__ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowercase_,lowercase_ ) )
self.assertEqual(lowercase_,lowercase_ )
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(8_0_0,1_4_0_0,2_0_0 )]
A__ = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test not batched input
A__ = feature_extractor(np_speech_inputs[0],return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A__ = feature_extractor(lowercase_,return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A__ = feature_extractor(
lowercase_,return_tensors='np',sampling_rate=4_4_1_0_0,mask_audio=lowercase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
A__ = np.asarray(lowercase_ )
A__ = feature_extractor(lowercase_,return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case__ ( self : Optional[Any],lowercase_ : Tuple )-> Tuple:
'''simple docstring'''
A__ = load_dataset('hf-internal-testing/librispeech_asr_dummy','clean',split='validation' )
# automatic decoding with librispeech
A__ = ds.sort('id' ).select(range(lowercase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = self._load_datasamples(1 )
A__ = TvltFeatureExtractor()
A__ = feature_extractor(lowercase_,return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape,(1, 1, 1_9_2, 1_2_8) )
A__ = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2],lowercase_,atol=1E-4 ) )
| 586 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_lowerCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_lowerCamelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ["pixel_values"]
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = True, __a = None, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : List[str] = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase : List[str] = get_size_dict(__a, default_to_square=__a)
_lowerCAmelCase : str = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCAmelCase : int = get_size_dict(__a, param_name="crop_size")
_lowerCAmelCase : Dict = do_resize
_lowerCAmelCase : Tuple = size
_lowerCAmelCase : int = do_center_crop
_lowerCAmelCase : Tuple = crop_size
_lowerCAmelCase : Tuple = resample
_lowerCAmelCase : Tuple = do_rescale
_lowerCAmelCase : List[str] = rescale_factor
_lowerCAmelCase : Union[str, Any] = do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, __a, __a, __a = PILImageResampling.BILINEAR, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_size_dict(__a, default_to_square=__a)
if "shortest_edge" in size:
_lowerCAmelCase : Optional[int] = get_resize_output_image_size(__a, size["shortest_edge"], default_to_square=__a)
elif "height" in size and "width" in size:
_lowerCAmelCase : Tuple = (size["height"], size["width"])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}")
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}")
return center_crop(__a, size=(size["height"], size["width"]), data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return rescale(__a, scale=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return normalize(__a, mean=__a, std=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : Dict = to_numpy_array(__a)
if do_resize:
_lowerCAmelCase : str = self.resize(image=__a, size=__a, resample=__a)
if do_center_crop:
_lowerCAmelCase : List[str] = self.center_crop(__a, size=__a)
if do_rescale:
_lowerCAmelCase : Tuple = self.rescale(image=__a, scale=__a)
if do_normalize:
_lowerCAmelCase : Any = self.normalize(image=__a, mean=__a, std=__a)
_lowerCAmelCase : Any = to_channel_dimension_format(__a, __a)
return image
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Dict = resample if resample is not None else self.resample
_lowerCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Tuple = size if size is not None else self.size
_lowerCAmelCase : List[str] = get_size_dict(__a, default_to_square=__a)
_lowerCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : Dict = get_size_dict(__a, param_name="crop_size")
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
_lowerCAmelCase : str = make_batched(__a)
_lowerCAmelCase : int = [
[
self._preprocess_image(
image=__a, do_resize=__a, size=__a, resample=__a, do_center_crop=__a, crop_size=__a, do_rescale=__a, rescale_factor=__a, do_normalize=__a, image_mean=__a, image_std=__a, data_format=__a, )
for img in video
]
for video in videos
]
_lowerCAmelCase : Optional[Any] = {"pixel_values": videos}
return BatchFeature(data=__a, tensor_type=__a)
| 500 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCamelCase__( datasets.BeamBasedBuilder ):
def a__( self : List[str] )-> int:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=lowerCAmelCase , )
def a__( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] )-> int:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def a__( self : int , lowerCAmelCase : Dict , lowerCAmelCase : List[str] )-> int:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
class UpperCamelCase__( datasets.BeamBasedBuilder ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=lowerCAmelCase , )
def a__( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def a__( self : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] )-> Optional[int]:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def lowerCamelCase__ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class UpperCamelCase__( lowerCAmelCase ):
@require_beam
def a__( self : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
import apache_beam as beam
UpperCAmelCase = beam.io.parquetio.WriteToParquet
UpperCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase = partial(lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = DummyBeamDataset(cache_dir=lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def a__( self : str )-> int:
"""simple docstring"""
UpperCAmelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = NestedBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 210 | 0 |
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( lowercase : str = "" ) ->dict[str, float]:
"""simple docstring"""
lowercase__ = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
lowercase__ = BeautifulSoup(requests.get(lowercase ).text , '''html.parser''' )
lowercase__ = soup.find_all('''td''' , attrs='''titleColumn''' )
lowercase__ = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase , lowercase )
}
def _lowerCAmelCase ( lowercase : str = "IMDb_Top_250_Movies.csv" ) ->None:
"""simple docstring"""
lowercase__ = get_imdb_top_aaa_movies()
with open(lowercase , '''w''' , newline='''''' ) as out_file:
lowercase__ = csv.writer(lowercase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 709 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _lowerCAmelCase ( lowercase : int ) ->Tuple:
"""simple docstring"""
def is_in_circle(lowercase : float , lowercase : float ) -> bool:
lowercase__ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowercase__ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowercase ) )
# The ratio of the area for circle to square is pi/4.
lowercase__ = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def _lowerCAmelCase ( lowercase : int , lowercase : Callable[[float], float] , lowercase : float = 0.0 , lowercase : float = 1.0 , ) ->float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowercase , lowercase ) ) for _ in range(lowercase ) ) * (max_value - min_value)
def _lowerCAmelCase ( lowercase : int , lowercase : float = 0.0 , lowercase : float = 1.0 ) ->None:
"""simple docstring"""
def identity_function(lowercase : float ) -> float:
return x
lowercase__ = area_under_curve_estimator(
lowercase , lowercase , lowercase , lowercase )
lowercase__ = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def _lowerCAmelCase ( lowercase : int ) ->None:
"""simple docstring"""
def function_to_integrate(lowercase : float ) -> float:
return sqrt(4.0 - x * x )
lowercase__ = area_under_curve_estimator(
lowercase , lowercase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class a_ ( SCREAMING_SNAKE_CASE__ ):
A = '''mobilenet_v2'''
def __init__( self , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu6" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.8 , SCREAMING_SNAKE_CASE=0.0_2 , SCREAMING_SNAKE_CASE=0.0_0_1 , SCREAMING_SNAKE_CASE=255 , **SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = depth_multiplier
SCREAMING_SNAKE_CASE_ = depth_divisible_by
SCREAMING_SNAKE_CASE_ = min_depth
SCREAMING_SNAKE_CASE_ = expand_ratio
SCREAMING_SNAKE_CASE_ = output_stride
SCREAMING_SNAKE_CASE_ = first_layer_is_expansion
SCREAMING_SNAKE_CASE_ = finegrained_output
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = tf_padding
SCREAMING_SNAKE_CASE_ = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = semantic_loss_ignore_index
class a_ ( SCREAMING_SNAKE_CASE__ ):
A = version.parse('''1.11''' )
@property
def A_( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def A_( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def A_( self ) -> float:
"""simple docstring"""
return 1e-4
| 205 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowercase ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE_ = version.parse(accelerate.__version__ ).base_version
if version.parse(SCREAMING_SNAKE_CASE ) < version.parse('0.17.0' ):
return method
def wrapper(self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return wrapper
| 205 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCAmelCase: Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase: Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase: Dict = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase: Dict = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
UpperCAmelCase: List[Any] = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : List[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ : List[str] = DistilBertTokenizer
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_="[UNK]" ,UpperCAmelCase_="[SEP]" ,UpperCAmelCase_="[PAD]" ,UpperCAmelCase_="[CLS]" ,UpperCAmelCase_="[MASK]" ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,**UpperCAmelCase_ ,):
super().__init__(
UpperCAmelCase_ ,tokenizer_file=UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ ,unk_token=UpperCAmelCase_ ,sep_token=UpperCAmelCase_ ,pad_token=UpperCAmelCase_ ,cls_token=UpperCAmelCase_ ,mask_token=UpperCAmelCase_ ,tokenize_chinese_chars=UpperCAmelCase_ ,strip_accents=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,UpperCAmelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,UpperCAmelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,UpperCAmelCase_ ) != tokenize_chinese_chars
):
_lowercase : Optional[int] = getattr(UpperCAmelCase_ ,normalizer_state.pop("""type""" ) )
_lowercase : str = do_lower_case
_lowercase : int = strip_accents
_lowercase : List[Any] = tokenize_chinese_chars
_lowercase : Union[str, Any] = normalizer_class(**UpperCAmelCase_ )
_lowercase : Optional[int] = do_lower_case
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
_lowercase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Optional[Any] = [self.sep_token_id]
_lowercase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : int = self._tokenizer.model.save(UpperCAmelCase_ ,name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 708 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if n == 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_lowercase : int = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : str = 0
_lowercase : Any = 2
while digits < n:
index += 1
_lowercase : Union[str, Any] = len(str(fibonacci(__UpperCAmelCase ) ) )
return index
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1000 ):
return fibonacci_digits_index(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 600 | 0 |
from __future__ import annotations
from cmath import sqrt
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
_lowerCamelCase : Any =b * b - 4 * a * c
_lowerCamelCase : Tuple =(-b + sqrt(SCREAMING_SNAKE_CASE__ )) / (2 * a)
_lowerCamelCase : Any =(-b - sqrt(SCREAMING_SNAKE_CASE__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def a_ ( ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : List[str] =quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 464 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCamelCase = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCamelCase = get_tests_dir('fixtures/vocab.json')
lowerCamelCase = get_tests_dir('fixtures')
class A ( unittest.TestCase ):
UpperCamelCase__ : Dict =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def lowerCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Dict =0
def lowerCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : List[Any] =AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : int =WavaVecaConfig()
_lowerCamelCase : Dict =AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
_lowerCamelCase : Any =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
copyfile(lowercase_ , os.path.join(lowercase_ , 'vocab.json' ) )
_lowerCamelCase : Union[str, Any] =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : List[Any] =WavaVecaFeatureExtractor()
_lowerCamelCase : List[str] =AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
_lowerCamelCase : str =WavaVecaProcessor(lowercase_ , lowercase_ )
# save in new folder
processor.save_pretrained(lowercase_ )
# drop `processor_class` in tokenizer
with open(os.path.join(lowercase_ , lowercase_ ) , 'r' ) as f:
_lowerCamelCase : Optional[int] =json.load(lowercase_ )
config_dict.pop('processor_class' )
with open(os.path.join(lowercase_ , lowercase_ ) , 'w' ) as f:
f.write(json.dumps(lowercase_ ) )
_lowerCamelCase : Optional[int] =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[Any] =WavaVecaFeatureExtractor()
_lowerCamelCase : Tuple =AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
_lowerCamelCase : Dict =WavaVecaProcessor(lowercase_ , lowercase_ )
# save in new folder
processor.save_pretrained(lowercase_ )
# drop `processor_class` in feature extractor
with open(os.path.join(lowercase_ , lowercase_ ) , 'r' ) as f:
_lowerCamelCase : Union[str, Any] =json.load(lowercase_ )
config_dict.pop('processor_class' )
with open(os.path.join(lowercase_ , lowercase_ ) , 'w' ) as f:
f.write(json.dumps(lowercase_ ) )
_lowerCamelCase : Optional[int] =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[Any] =WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(lowercase_ )
# copy relevant files
copyfile(lowercase_ , os.path.join(lowercase_ , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(lowercase_ , lowercase_ ) , 'w' ) as f:
f.write('{}' )
_lowerCamelCase : int =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(lowercase_ ):
_lowerCamelCase : int =AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
_lowerCamelCase : Union[str, Any] =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ )
_lowerCamelCase : List[str] =AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
_lowerCamelCase : int =processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
_lowerCamelCase : Optional[int] =processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
_lowerCamelCase : int =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ , use_fast=lowercase_ )
_lowerCamelCase : Optional[int] =new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoProcessor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoProcessor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : str =CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : str =os.path.join(lowercase_ , 'vocab.txt' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : List[Any] =CustomTokenizer(lowercase_ )
_lowerCamelCase : Optional[int] =CustomProcessor(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowercase_ )
_lowerCamelCase : List[Any] =AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
class A ( UpperCamelCase_ ):
UpperCamelCase__ : Optional[Any] =False
class A ( UpperCamelCase_ ):
UpperCamelCase__ : int =False
class A ( UpperCamelCase_ ):
UpperCamelCase__ : Union[str, Any] ='AutoFeatureExtractor'
UpperCamelCase__ : str ='AutoTokenizer'
UpperCamelCase__ : List[Any] =False
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoProcessor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local classes.
_lowerCamelCase : int =AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_lowerCamelCase : int =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_lowerCamelCase : str =AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase_ )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] =AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Any =AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class A ( unittest.TestCase ):
UpperCamelCase__ : List[Any] =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCamelCase ( cls : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def lowerCamelCase ( cls : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
_lowerCamelCase : Tuple =WavaVecaProcessor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_ , 'test-processor' ) , push_to_hub=lowercase_ , use_auth_token=self._token )
_lowerCamelCase : Union[str, Any] =WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : int =WavaVecaProcessor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_ , 'test-processor-org' ) , push_to_hub=lowercase_ , use_auth_token=self._token , organization='valid_org' , )
_lowerCamelCase : str =WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_lowerCamelCase : Optional[Any] =CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Dict =os.path.join(lowercase_ , 'vocab.txt' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : Any =CustomTokenizer(lowercase_ )
_lowerCamelCase : List[Any] =CustomProcessor(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'''{USER}/test-dynamic-processor''' , token=self._token )
_lowerCamelCase : List[str] =Repository(lowercase_ , clone_from=F'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(lowercase_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowercase_ , 'tokenizer_config.json' ) ) as f:
_lowerCamelCase : Union[str, Any] =json.load(lowercase_ )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , 'custom_processing.py' ) ) )
repo.push_to_hub()
_lowerCamelCase : Tuple =AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''' , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' )
| 464 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=False ):
__SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__SCREAMING_SNAKE_CASE = """"""
else:
__SCREAMING_SNAKE_CASE = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
__SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( UpperCamelCase_ ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
__SCREAMING_SNAKE_CASE = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = dct.pop(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = val
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = ViTMSNConfig()
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = """datasets/huggingface/label-files"""
__SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 6
elif "l16" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 0.1
elif "b4" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 4
elif "l7" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 7
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 0.1
__SCREAMING_SNAKE_CASE = ViTMSNModel(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )["""target_encoder"""]
__SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = create_rename_keys(UpperCamelCase__ , base_model=UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , base_model=UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__SCREAMING_SNAKE_CASE = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
__SCREAMING_SNAKE_CASE = model(**UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
__SCREAMING_SNAKE_CASE = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase__ , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__magic_name__ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 715 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""")
__SCREAMING_SNAKE_CASE = model
__SCREAMING_SNAKE_CASE = kwargs.get("""model_save_dir""" , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = kwargs.get("""latest_model_name""" , lowerCAmelCase__)
def __call__( self , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = {k: np.array(lowerCAmelCase__) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__)
@staticmethod
def snake_case_ ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""")
__SCREAMING_SNAKE_CASE = """CPUExecutionProvider"""
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__SCREAMING_SNAKE_CASE = self.model_save_dir.joinpath(self.latest_model_name)
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase__).joinpath(lowerCAmelCase__)
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__)
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__SCREAMING_SNAKE_CASE = self.model_save_dir.joinpath(lowerCAmelCase__)
if src_path.exists():
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase__).joinpath(lowerCAmelCase__)
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__)
except shutil.SameFileError:
pass
def snake_case_ ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ):
if os.path.isfile(lowerCAmelCase__):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase__)
# load model from hub
else:
# download model
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase__).parent
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase__).name
__SCREAMING_SNAKE_CASE = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__)
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = None
if len(str(lowerCAmelCase__).split("""@""")) == 2:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = model_id.split("""@""")
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 248 | 0 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__UpperCAmelCase = '''naver-clova-ix/donut-base'''
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DonutProcessor.from_pretrained(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowerCAmelCase__ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowerCAmelCase__ = self.processor.tokenajson(lowerCamelCase_ )
self.assertDictEqual(lowerCamelCase_ , lowerCamelCase_ ) | 90 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "roberta-prelayernorm"
def __init__( self , __lowerCamelCase=5_0_2_6_5 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase)
_A : str = vocab_size
_A : Union[str, Any] = hidden_size
_A : Optional[int] = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : List[Any] = hidden_act
_A : Dict = intermediate_size
_A : Tuple = hidden_dropout_prob
_A : Optional[int] = attention_probs_dropout_prob
_A : str = max_position_embeddings
_A : Dict = type_vocab_size
_A : str = initializer_range
_A : str = layer_norm_eps
_A : List[Any] = position_embedding_type
_A : Union[str, Any] = use_cache
_A : Union[str, Any] = classifier_dropout
class lowerCAmelCase__ ( a):
'''simple docstring'''
@property
def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 503 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53 | 1 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
'''simple docstring'''
def get_matched_characters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
snake_case : Dict = []
snake_case : List[str] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
snake_case : Optional[int] = int(max(0 , i - limit ) )
snake_case : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(SCREAMING_SNAKE_CASE__ )
snake_case : Tuple = F'{_stra[0:_stra.index(SCREAMING_SNAKE_CASE__ )]} {_stra[_stra.index(SCREAMING_SNAKE_CASE__ ) + 1:]}'
return "".join(SCREAMING_SNAKE_CASE__ )
# matching characters
snake_case : Union[str, Any] = get_matched_characters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = get_matched_characters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case : Tuple = len(SCREAMING_SNAKE_CASE__ )
# transposition
snake_case : List[Any] = (
len([(ca, ca) for ca, ca in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if ca != ca] ) // 2
)
if not match_count:
snake_case : str = 0.0
else:
snake_case : Dict = (
1
/ 3
* (
match_count / len(SCREAMING_SNAKE_CASE__ )
+ match_count / len(SCREAMING_SNAKE_CASE__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
snake_case : Tuple = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 638 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 200_0000 ) -> int:
'''simple docstring'''
snake_case : list[int] = [0]
snake_case : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case : int = 0
# the area corresponding to the grid that gives the product closest to target
snake_case : int = 0
# an estimate of b, using the quadratic formula
snake_case : float
# the largest integer less than b_estimate
snake_case : int
# the largest integer less than b_estimate
snake_case : int
# the triangle number corresponding to b_floor
snake_case : int
# the triangle number corresponding to b_ceil
snake_case : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case : int = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case : Optional[int] = floor(SCREAMING_SNAKE_CASE__ )
snake_case : int = ceil(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[Any] = triangle_numbers[b_floor]
snake_case : Union[str, Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case : str = triangle_b_first_guess * triangle_a
snake_case : int = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case : List[Any] = triangle_b_second_guess * triangle_a
snake_case : Any = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 638 | 1 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = ["audio_values", "audio_mask"]
def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = spectrogram(
_UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ = is_batched_numpy or (
isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ):
lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa )
elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], _UpperCAmelCase ):
lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowercase__ = {"audio_values": padded_audio_features}
lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
return encoded_inputs
| 668 | """simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_: Dict = "pt"
elif is_tf_available():
lowerCAmelCase_: Dict = "tf"
else:
lowerCAmelCase_: str = "jax"
class a__ ( _a , unittest.TestCase ):
snake_case_ = ByTaTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowercase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = "Unicode €."
lowercase__ = tokenizer(_UpperCAmelCase )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" )
lowercase__ = tokenizer("e è é ê ë" )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("decoder_input_ids", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
lowercase__ = tokenizer(
text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization. </s>"]
lowercase__ = ["Summary of the text. </s>"]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] )
self.assertEqual(_UpperCAmelCase, batch["labels"][0] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
| 668 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCamelCase_ (SCREAMING_SNAKE_CASE_ ):
__magic_name__ = "pegasus"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , lowerCAmelCase_ : List[str]=50_265 , lowerCAmelCase_ : Dict=1_024 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Optional[Any]=4_096 , lowerCAmelCase_ : List[Any]=16 , lowerCAmelCase_ : List[Any]=12 , lowerCAmelCase_ : str=4_096 , lowerCAmelCase_ : Any=16 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Any=1_024 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=0.0_2 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Union[str, Any]=1 , **lowerCAmelCase_ : Tuple , ) -> Tuple:
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = d_model
UpperCAmelCase_ : str = encoder_ffn_dim
UpperCAmelCase_ : Optional[int] = encoder_layers
UpperCAmelCase_ : str = encoder_attention_heads
UpperCAmelCase_ : Dict = decoder_ffn_dim
UpperCAmelCase_ : Dict = decoder_layers
UpperCAmelCase_ : Optional[Any] = decoder_attention_heads
UpperCAmelCase_ : Dict = dropout
UpperCAmelCase_ : Optional[Any] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Any = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : List[str] = encoder_layerdrop
UpperCAmelCase_ : int = decoder_layerdrop
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : Optional[int] = encoder_layers
UpperCAmelCase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.d_model
| 95 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 373 | 0 |
def _lowercase ( lowercase__ = 1_0_0_0 ):
return sum(e for e in range(3 , lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 700 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 583 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
lowerCAmelCase : int = logging.get_logger('transformers.models.speecht5')
lowerCAmelCase : Optional[int] = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
lowerCAmelCase : Any = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
lowerCAmelCase : Tuple = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
lowerCAmelCase : List[str] = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
lowerCAmelCase : int = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
lowerCAmelCase : Tuple = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
lowerCAmelCase : Any = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
lowerCAmelCase : List[Any] = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
lowerCAmelCase : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
lowerCAmelCase : str = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCAmelCase : str = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCAmelCase : Any = []
lowerCAmelCase : Union[str, Any] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
lowerCAmelCase : List[Any] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
lowerCAmelCase : str = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
lowerCAmelCase : int = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def A_( A : Optional[int] , A : List[Any] , A : Optional[int] , A : Dict , A : Any):
for attribute in key.split('.'):
UpperCamelCase = getattr(A , A)
if weight_type is not None:
UpperCamelCase = getattr(A , A).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''')
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''')
def A_( A : Any , A : Optional[Any]):
for key in ignore_keys:
if key.endswith('.*'):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
UpperCamelCase , UpperCamelCase = key.split('.*.')
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A_( A : List[Any] , A : Dict , A : Union[str, Any]):
UpperCamelCase = []
if task == "s2t":
UpperCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase = MAPPING_S2T
UpperCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCamelCase = None
UpperCamelCase = MAPPING_T2S
UpperCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase = MAPPING_S2S
UpperCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''')
for name, value in fairseq_dict.items():
if should_ignore(A , A):
logger.info(f'''{name} was ignored''')
continue
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCamelCase , UpperCamelCase = key.split('.*.')
if prefix in name and suffix in name:
UpperCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(A)[0].split('.')[-2]
UpperCamelCase = mapped_key.replace('*' , A)
if "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(A , A , A , A , A)
continue
if not is_used:
unused_weights.append(A)
logger.warning(f'''Unused weights: {unused_weights}''')
def A_( A : Union[str, Any] , A : Union[str, Any] , A : Optional[Any] , A : Optional[Any] , A : int):
UpperCamelCase = full_name.split('conv_layers.')[-1]
UpperCamelCase = name.split('.')
UpperCamelCase = int(items[0])
UpperCamelCase = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(A)
@torch.no_grad()
def A_( A : Optional[Any] , A : Tuple , A : str , A : List[str]=None , A : List[Any]=None , A : Any=None , ):
if config_path is not None:
UpperCamelCase = SpeechTaConfig.from_pretrained(A)
else:
UpperCamelCase = SpeechTaConfig()
if task == "s2t":
UpperCamelCase = config.max_text_positions
UpperCamelCase = SpeechTaForSpeechToText(A)
elif task == "t2s":
UpperCamelCase = 1876
UpperCamelCase = 600
UpperCamelCase = config.max_speech_positions
UpperCamelCase = SpeechTaForTextToSpeech(A)
elif task == "s2s":
UpperCamelCase = 1876
UpperCamelCase = config.max_speech_positions
UpperCamelCase = SpeechTaForSpeechToSpeech(A)
else:
raise ValueError(f'''Unknown task name: {task}''')
if vocab_path:
UpperCamelCase = SpeechTaTokenizer(A , model_max_length=config.max_text_positions)
# Mask token behaves like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken('<mask>' , lstrip=A , rstrip=A)
UpperCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token})
tokenizer.add_tokens(['<ctc_blank>'])
UpperCamelCase = SpeechTaFeatureExtractor()
UpperCamelCase = SpeechTaProcessor(tokenizer=A , feature_extractor=A)
processor.save_pretrained(A)
UpperCamelCase = torch.load(A)
recursively_load_weights(fairseq_checkpoint['model'] , A , A)
model.save_pretrained(A)
if repo_id:
print('Pushing to the hub...')
processor.push_to_hub(A)
model.push_to_hub(A)
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowerCAmelCase : Any = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 3 |
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase ):
A__ = size
A__ = [0] * size
A__ = [0] * size
@staticmethod
def UpperCamelCase ( __lowerCamelCase ):
return index | (index + 1)
@staticmethod
def UpperCamelCase ( __lowerCamelCase ):
return (index & (index + 1)) - 1
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = value
while index < self.size:
A__ = self.get_prev(__lowerCamelCase ) + 1
if current_left_border == index:
A__ = value
else:
A__ = max(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
A__ = self.get_next(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
right -= 1 # Because of right is exclusive
A__ = 0
while left <= right:
A__ = self.get_prev(__lowerCamelCase )
if left <= current_left:
A__ = max(__lowerCamelCase,self.tree[right] )
A__ = current_left
else:
A__ = max(__lowerCamelCase,self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 190 | 0 |
"""simple docstring"""
def _a ( UpperCAmelCase__ ) -> list[int]:
__SCREAMING_SNAKE_CASE = [0 for i in range(len(UpperCAmelCase__ ) )]
# initialize interval's left pointer and right pointer
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0
for i in range(1 , len(UpperCAmelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
__SCREAMING_SNAKE_CASE = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__SCREAMING_SNAKE_CASE = min_edge
while go_next(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = i, i + z_result[i] - 1
return z_result
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> bool:
return i + z_result[i] < len(UpperCAmelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
__SCREAMING_SNAKE_CASE = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__SCREAMING_SNAKE_CASE = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCAmelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ =logging.get_logger(__name__)
def _a ( UpperCAmelCase__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' )
if "model" in sd.keys():
__SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__SCREAMING_SNAKE_CASE = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__SCREAMING_SNAKE_CASE = sd.pop(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__SCREAMING_SNAKE_CASE = sd[key]
# We split QKV in separate Q,K,V
__SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__SCREAMING_SNAKE_CASE = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__SCREAMING_SNAKE_CASE = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.split(UpperCAmelCase__ , depth // 3 , dim=0 )
__SCREAMING_SNAKE_CASE = q
__SCREAMING_SNAKE_CASE = k
__SCREAMING_SNAKE_CASE = v
del sd[key]
return sd
@torch.no_grad()
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = load_checkpoint(UpperCAmelCase__ )
if config is not None:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = OPTConfig()
__SCREAMING_SNAKE_CASE = OPTModel(UpperCAmelCase__ ).half().eval()
model.load_state_dict(UpperCAmelCase__ )
# Check results
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
lowerCAmelCase__ =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 690 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__lowerCAmelCase : int =parser.parse_args()
if args.model_type == "bert":
__lowerCAmelCase : Any =BertForMaskedLM.from_pretrained(args.model_name)
__lowerCAmelCase : Union[str, Any] ="""bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
__lowerCAmelCase : int =model.state_dict()
__lowerCAmelCase : Optional[int] ={}
for w in ["word_embeddings", "position_embeddings"]:
__lowerCAmelCase : Union[str, Any] =state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
__lowerCAmelCase : Dict =state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
__lowerCAmelCase : Dict =0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
__lowerCAmelCase : Dict =state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
__lowerCAmelCase : int =state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
__lowerCAmelCase : Optional[int] =state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
__lowerCAmelCase : int =state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
__lowerCAmelCase : List[Any] =state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
__lowerCAmelCase : Optional[int] =state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
__lowerCAmelCase : str =state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
__lowerCAmelCase : List[str] =state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
__lowerCAmelCase : Union[str, Any] =state_dict["""cls.predictions.decoder.weight"""]
__lowerCAmelCase : Union[str, Any] =state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowerCAmelCase : List[str] =state_dict[F"""cls.predictions.transform.dense.{w}"""]
__lowerCAmelCase : int =state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 359 | """simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : str = TransfoXLTokenizer
snake_case__ : Union[str, Any] = False
snake_case__ : Union[str, Any] = False
def A__ ( self ):
"""simple docstring"""
super().setUp()
lowercase = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """<unk> UNwanted , running"""
lowercase = """<unk> unwanted, running"""
return input_text, output_text
def A__ ( self ):
"""simple docstring"""
lowercase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCAmelCase )
lowercase = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(__lowerCAmelCase , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [0, 4, 8, 7] )
def A__ ( self ):
"""simple docstring"""
lowercase = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def A__ ( self ):
"""simple docstring"""
lowercase = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def A__ ( self ):
"""simple docstring"""
lowercase = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
lowercase = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
lowercase = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCAmelCase ) , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
lowercase = len(__lowerCAmelCase )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowerCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 359 | 1 |
'''simple docstring'''
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCAmelCase : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _A ( ):
snake_case__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
snake_case__ : Dict = get_sagemaker_input()
else:
snake_case__ : List[Any] = get_cluster_input()
return config
def _A ( snake_case__ : Tuple=None ):
if subparsers is not None:
snake_case__ : Tuple = subparsers.add_parser('''config''' , description=snake_case__ )
else:
snake_case__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=snake_case__ )
parser.add_argument(
'''--config_file''' , default=snake_case__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def _A ( snake_case__ : List[str] ):
snake_case__ : Optional[Any] = get_user_input()
if args.config_file is not None:
snake_case__ : List[Any] = args.config_file
else:
if not os.path.isdir(snake_case__ ):
os.makedirs(snake_case__ )
snake_case__ : Any = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(snake_case__ )
else:
config.to_yaml_file(snake_case__ )
print(f'''accelerate configuration saved at {config_file}''' )
def _A ( ):
snake_case__ : Tuple = config_command_parser()
snake_case__ : int = parser.parse_args()
config_command(snake_case__ )
if __name__ == "__main__":
main()
| 709 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase : Union[str, Any] = "\nimport os\n"
_lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n"
_lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowerCAmelCase : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Dict ):
snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' )
with open(snake_case__ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case__ )
snake_case__ : int = get_imports(snake_case__ )
assert parsed_imports == ["os"]
| 694 | 0 |
from statistics import mean
import numpy as np
def A__ ( _a : list , _a : list , _a : list , _a : int ):
'''simple docstring'''
snake_case__ : Dict =0
# Number of processes finished
snake_case__ : List[str] =0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
snake_case__ : List[Any] =[0] * no_of_process
# List to include calculation results
snake_case__ : Optional[int] =[0] * no_of_process
# Sort by arrival time.
snake_case__ : List[str] =[burst_time[i] for i in np.argsort(_a )]
snake_case__ : Tuple =[process_name[i] for i in np.argsort(_a )]
arrival_time.sort()
while no_of_process > finished_process_count:
snake_case__ : Union[str, Any] =0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
snake_case__ : Optional[int] =arrival_time[i]
snake_case__ : Optional[Any] =0
# Index showing the location of the process being performed
snake_case__ : Tuple =0
# Saves the current response ratio.
snake_case__ : int =0
for i in range(0 , _a ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
snake_case__ : Tuple =(burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
snake_case__ : List[str] =temp
snake_case__ : List[Any] =i
# Calculate the turn around time
snake_case__ : Dict =current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
snake_case__ : Dict =1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def A__ ( _a : list , _a : list , _a : list , _a : int ):
'''simple docstring'''
snake_case__ : Optional[int] =[0] * no_of_process
for i in range(0 , _a ):
snake_case__ : int =turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = 5
__lowerCamelCase : int = ["""A""", """B""", """C""", """D""", """E"""]
__lowerCamelCase : Any = [1, 2, 3, 4, 5]
__lowerCamelCase : str = [1, 2, 3, 4, 5]
__lowerCamelCase : str = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__lowerCamelCase : str = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F"{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"
F"{turn_around_time[i]}\t\t\t{waiting_time[i]}"
)
print(F"average waiting time : {mean(waiting_time):.5f}")
print(F"average turn around time : {mean(turn_around_time):.5f}")
| 385 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCamelCase : Optional[Any] = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 385 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures/dummy-config.json")
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Dict ) -> List[Any]:
UpperCAmelCase = 0
def _UpperCamelCase ( self : Optional[int] ) -> Dict:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
UpperCAmelCase = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : str ) -> int:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : str ) -> Any:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase = AutoConfig.for_model("roberta" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : int ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
UpperCAmelCase = os.path.join(lowerCAmelCase__ , "fake-roberta" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(type(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
try:
AutoConfig.register("custom" , lowerCAmelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("model" , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("bert" , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
with self.assertRaisesRegex(
lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase = AutoConfig.from_pretrained("bert-base" )
def _UpperCamelCase ( self : int ) -> int:
with self.assertRaisesRegex(
lowerCAmelCase__ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ , revision="aaaaaa" )
def _UpperCamelCase ( self : Any ) -> Optional[int]:
with self.assertRaisesRegex(
lowerCAmelCase__ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _UpperCamelCase ( self : str ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase__ ):
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def _UpperCamelCase ( self : Tuple ) -> Tuple:
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """new-model"""
try:
AutoConfig.register("new-model" , lowerCAmelCase__ )
# If remote code is not set, the default is to use local
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 709 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ = {"UserAgent": UserAgent().random}
def _lowerCAmelCase( __A ):
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __magic_name__ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Any:
UpperCAmelCase = f"https://www.instagram.com/{username}/"
UpperCAmelCase = self.get_json()
def _UpperCamelCase ( self : List[str] ) -> dict:
UpperCAmelCase = requests.get(self.url , headers=lowerCAmelCase__ ).text
UpperCAmelCase = BeautifulSoup(lowerCAmelCase__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ) -> str:
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _UpperCamelCase ( self : Any ) -> str:
return self.user_data["username"]
@property
def _UpperCamelCase ( self : List[Any] ) -> str:
return self.user_data["full_name"]
@property
def _UpperCamelCase ( self : List[str] ) -> str:
return self.user_data["biography"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def _UpperCamelCase ( self : str ) -> str:
return self.user_data["external_url"]
@property
def _UpperCamelCase ( self : int ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase ( self : List[str] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase ( self : Tuple ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def _UpperCamelCase ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def _lowerCAmelCase( __A = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 1 | 0 |
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =set()
# edges = list of graph's edges
UpperCAmelCase_ =get_edges(lowercase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCAmelCase_ , UpperCAmelCase_ =edges.pop()
chosen_vertices.add(lowercase__ )
chosen_vertices.add(lowercase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase__ )
return chosen_vertices
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 54 |
import sys
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ =a + chain_length - 1
UpperCAmelCase_ =sys.maxsize
for c in range(lowercase__ , lowercase__ ):
UpperCAmelCase_ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ =cost
UpperCAmelCase_ =c
return matrix, sol
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if i == j:
print("A" + str(lowercase__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
UpperCAmelCase_ =len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ =matrix_chain_order(lowercase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 54 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__snake_case :List[Any] = logging.get_logger(__name__)
class _A ( _snake_case ):
def __init__( self : Dict , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_)
| 720 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case :Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : List[str] = GPTSwaTokenizer
UpperCamelCase__ : Dict = False
UpperCamelCase__ : int = True
UpperCamelCase__ : List[Any] = False
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = '''This is a test'''
__a = '''This is a test'''
return input_text, output_text
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = '''<s>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2_000)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_000)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [465, 287, 265, 631, 842])
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
__a = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# Test that decode_fast returns the input text
for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
__a = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , )
| 60 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCamelCase = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 |
"""simple docstring"""
from itertools import count
def __magic_name__ ( _lowerCamelCase: int = 50 ) -> int:
'''simple docstring'''
lowerCAmelCase = [1] * min_block_length
for n in count(_lowerCamelCase ):
fill_count_functions.append(1 )
for block_length in range(_lowerCamelCase, n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 535 | 0 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__snake_case :Tuple ='src/transformers'
__snake_case :Dict ='docs/source/en'
__snake_case :Dict ='.'
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.readlines()
# Find the start prompt.
A = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
A = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__snake_case :List[Any] ='Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__snake_case :List[Any] =re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__snake_case :List[str] =re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__snake_case :Tuple =re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
__snake_case :int =direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
A = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowerCAmelCase__ )
return [m.group(0 ) for m in matches]
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
A = 2 if text == '✅' or text == '❌' else len(lowerCAmelCase__ )
A = (width - text_length) // 2
A = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
A = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
A = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
A = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
A = collections.defaultdict(lowerCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase__ ):
A = None
if attr_name.endswith('Tokenizer' ):
A = slow_tokenizers
A = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
A = fast_tokenizers
A = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase__ ) is not None:
A = tf_models
A = _re_tf_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase__ ) is not None:
A = flax_models
A = _re_flax_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase__ ) is not None:
A = pt_models
A = _re_pt_models.match(lowerCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
A = True
break
# Try again after removing the last word in the name
A = ''.join(camel_case_split(lowerCAmelCase__ )[:-1] )
# Let's build that table!
A = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
A = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
A = [len(lowerCAmelCase__ ) + 2 for c in columns]
A = max([len(lowerCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
A = '|' + '|'.join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for c, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
A = {True: '✅', False: '❌'}
for name in model_names:
A = model_name_to_prefix[name]
A = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for l, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + "|\n"
return table
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple=False ) -> List[str]:
'''simple docstring'''
A , A , A , A = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
A = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
__snake_case :List[Any] =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__snake_case :List[Any] =parser.parse_args()
check_model_table(args.fix_and_overwrite) | 224 |
from __future__ import annotations
class lowerCAmelCase__ :
def __init__( self : Dict , __UpperCamelCase : list[list[int]] ) -> List[Any]:
A = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(__UpperCamelCase ) != 0:
A = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__UpperCamelCase ) != cols:
raise error
for value in row:
if not isinstance(__UpperCamelCase , (int, float) ):
raise error
A = rows
else:
A = []
def __UpperCamelCase ( self : Tuple ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __UpperCamelCase ( self : Optional[Any] ) -> int:
return len(self.rows )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> int:
return len(self.rows[0] )
@property
def __UpperCamelCase ( self : str ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Optional[int] ) -> bool:
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Optional[Any] ) -> Matrix:
A = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __UpperCamelCase ( self : List[str] ) -> bool:
return bool(self.determinant() )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : int ) -> int:
A = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__UpperCamelCase ).determinant()
def __UpperCamelCase ( self : Dict , __UpperCamelCase : int , __UpperCamelCase : int ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(__UpperCamelCase , __UpperCamelCase )
return -1 * self.get_minor(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Matrix:
return Matrix(
[
[self.get_minor(__UpperCamelCase , __UpperCamelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __UpperCamelCase ( self : str ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __UpperCamelCase ( self : Optional[int] ) -> Matrix:
A = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__UpperCamelCase )
def __UpperCamelCase ( self : Dict ) -> Matrix:
A = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Dict ) -> str:
return str(self.rows )
def __str__( self : List[str] ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(__UpperCamelCase ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : list[int] , __UpperCamelCase : int | None = None ) -> None:
A = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise type_error
for value in row:
if not isinstance(__UpperCamelCase , (int, float) ):
raise type_error
if len(__UpperCamelCase ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(__UpperCamelCase )
else:
A = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Any , __UpperCamelCase : list[int] , __UpperCamelCase : int | None = None ) -> None:
A = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise type_error
for value in column:
if not isinstance(__UpperCamelCase , (int, float) ):
raise type_error
if len(__UpperCamelCase ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
A = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
A = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , __UpperCamelCase : object ) -> bool:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Optional[int] , __UpperCamelCase : object ) -> bool:
return not self == other
def __neg__( self : List[str] ) -> Matrix:
return self * -1
def __add__( self : str , __UpperCamelCase : Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Dict , __UpperCamelCase : Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : str , __UpperCamelCase : Matrix | int | float ) -> Matrix:
if isinstance(__UpperCamelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(__UpperCamelCase , __UpperCamelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Dict , __UpperCamelCase : int ) -> Matrix:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
A = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : List[Any] , __UpperCamelCase : list[int] , __UpperCamelCase : list[int] ) -> int:
return sum(row[i] * column[i] for i in range(len(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 224 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __lowercase ( snake_case, snake_case=False ):
"""simple docstring"""
try:
__magic_name__ :List[str] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__magic_name__ :Dict = default
else:
# KEY is set, convert it to True or False.
try:
__magic_name__ :int = strtobool(snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parse_flag_from_env("""RUN_SLOW""", default=False)
SCREAMING_SNAKE_CASE__ : Dict = parse_flag_from_env("""RUN_REMOTE""", default=False)
SCREAMING_SNAKE_CASE__ : Any = parse_flag_from_env("""RUN_LOCAL""", default=True)
SCREAMING_SNAKE_CASE__ : Dict = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
SCREAMING_SNAKE_CASE__ : Optional[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
SCREAMING_SNAKE_CASE__ : Dict = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
SCREAMING_SNAKE_CASE__ : Optional[int] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
SCREAMING_SNAKE_CASE__ : List[str] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
SCREAMING_SNAKE_CASE__ : Dict = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE__ : Tuple = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
__magic_name__ :Optional[Any] = unittest.skip('''test requires faiss''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
__magic_name__ :Union[str, Any] = unittest.skip('''test requires regex''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
__magic_name__ :Optional[int] = unittest.skip('''test requires elasticsearch''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
__magic_name__ :Tuple = unittest.skip('''test requires sqlalchemy''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
__magic_name__ :Optional[int] = unittest.skip('''test requires PyTorch''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not config.TF_AVAILABLE:
__magic_name__ :Tuple = unittest.skip('''test requires TensorFlow''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
__magic_name__ :Optional[int] = unittest.skip('''test requires JAX''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
__magic_name__ :Tuple = unittest.skip('''test requires Pillow''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(snake_case )
else:
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(snake_case )
else:
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(snake_case )
else:
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
def _require_spacy_model(snake_case ):
try:
import spacy # noqa F401
spacy.load(snake_case )
except ImportError:
return unittest.skip('''test requires spacy''' )(snake_case )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(snake_case ) )(snake_case )
else:
return test_case
return _require_spacy_model
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(snake_case )
else:
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(snake_case )
else:
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
__magic_name__ :List[str] = unittest.skip('''test is slow''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
__magic_name__ :Optional[Any] = unittest.skip('''test is local''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
__magic_name__ :List[str] = unittest.skip('''test is packaged''' )(snake_case )
return test_case
def __lowercase ( snake_case ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
__magic_name__ :Any = unittest.skip('''test requires remote''' )(snake_case )
return test_case
def __lowercase ( *snake_case ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(snake_case ) and name.startswith('''test''' ):
for decorator in decorators:
__magic_name__ :List[Any] = decorator(snake_case )
setattr(cls, snake_case, snake_case )
return cls
return decorate
class lowerCamelCase_ ( lowerCamelCase ):
pass
class lowerCamelCase_ ( lowerCamelCase ):
a__ = 0
a__ = 1
a__ = 2
@contextmanager
def __lowercase ( snake_case=OfflineSimulationMode.CONNECTION_FAILS, snake_case=1E-1_6 ):
"""simple docstring"""
__magic_name__ :Optional[Any] = requests.Session().request
def timeout_request(snake_case, snake_case, snake_case, **snake_case ):
# Change the url to an invalid url so that the connection hangs
__magic_name__ :str = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__magic_name__ :Any = timeout
try:
return online_request(snake_case, snake_case, **snake_case )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__magic_name__ :int = url
__magic_name__ :int = e.args[0]
__magic_name__ :Optional[int] = (max_retry_error.args[0].replace('''10.255.255.1''', f'''OfflineMock[{url}]''' ),)
__magic_name__ :List[str] = (max_retry_error,)
raise
def raise_connection_error(snake_case, snake_case, **snake_case ):
raise requests.ConnectionError('''Offline mode is enabled.''', request=snake_case )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''', snake_case ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''', snake_case ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''', snake_case ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __lowercase ( *snake_case, **snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*snake_case, **snake_case ) as tmp_dir:
try:
os.chdir(snake_case )
yield
finally:
os.chdir(snake_case )
@contextmanager
def __lowercase ( ):
"""simple docstring"""
import gc
gc.collect()
__magic_name__ :List[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __lowercase ( ):
"""simple docstring"""
import gc
gc.collect()
__magic_name__ :Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
return deepcopy(snake_case ).integers(0, 1_0_0, 1_0 ).tolist() == deepcopy(snake_case ).integers(0, 1_0_0, 1_0 ).tolist()
def __lowercase ( snake_case ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(snake_case, *snake_case, **snake_case ):
try:
return func(*snake_case, **snake_case )
except HTTPError as err:
if str(snake_case ).startswith('''500''' ) or str(snake_case ).startswith('''502''' ):
pytest.xfail(str(snake_case ) )
raise err
return decorator.decorator(_wrapper, snake_case )
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = returncode
__magic_name__ :Any = stdout
__magic_name__ :str = stderr
async def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
while True:
__magic_name__ :Optional[int] = await stream.readline()
if line:
callback(snake_case )
else:
break
async def __lowercase ( snake_case, snake_case=None, snake_case=None, snake_case=None, snake_case=False, snake_case=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''', ''' '''.join(snake_case ) )
__magic_name__ :Optional[int] = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=snake_case, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=snake_case, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__magic_name__ :Any = []
__magic_name__ :Optional[Any] = []
def tee(snake_case, snake_case, snake_case, snake_case="" ):
__magic_name__ :Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(snake_case )
if not quiet:
print(snake_case, snake_case, file=snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda snake_case : tee(snake_case, snake_case, sys.stdout, label='''stdout:''' ) ),
_read_stream(p.stderr, lambda snake_case : tee(snake_case, snake_case, sys.stderr, label='''stderr:''' ) ),
], timeout=snake_case, )
return _RunOutput(await p.wait(), snake_case, snake_case )
def __lowercase ( snake_case, snake_case=None, snake_case=None, snake_case=1_8_0, snake_case=False, snake_case=True ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = asyncio.get_event_loop()
__magic_name__ :List[Any] = loop.run_until_complete(
_stream_subprocess(snake_case, env=snake_case, stdin=snake_case, timeout=snake_case, quiet=snake_case, echo=snake_case ) )
__magic_name__ :str = ''' '''.join(snake_case )
if result.returncode > 0:
__magic_name__ :Tuple = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :str = os.environ.get('''PYTEST_XDIST_WORKER''', '''gw0''' )
__magic_name__ :Union[str, Any] = re.sub(R'''^gw''', '''''', snake_case, 0, re.M )
return int(snake_case )
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = 2_9_5_0_0
__magic_name__ :List[Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: PreTrainedTokenizer , _lowerCamelCase: int , _lowerCamelCase: Optional[int] = None , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if train_file is not None:
__SCREAMING_SNAKE_CASE : Any = [train_file]
if eval_file is not None:
__SCREAMING_SNAKE_CASE : Any = [eval_file]
if test_file is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = [test_file]
__SCREAMING_SNAKE_CASE : Optional[Any] = datasets.load_dataset("""csv""" , data_files=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
__SCREAMING_SNAKE_CASE : Dict = features_name.pop(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = list(set(ds[list(files.keys() )[0]][label_name] ) )
__SCREAMING_SNAKE_CASE : str = {label: i for i, label in enumerate(_lowerCamelCase )}
__SCREAMING_SNAKE_CASE : Any = tokenizer.model_input_names
__SCREAMING_SNAKE_CASE : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
__SCREAMING_SNAKE_CASE : int = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__SCREAMING_SNAKE_CASE : int = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__SCREAMING_SNAKE_CASE : str = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE : int = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__SCREAMING_SNAKE_CASE : Optional[int] = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE : str = labelaid[ex[label_name]]
yield (d, label)
__SCREAMING_SNAKE_CASE : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__SCREAMING_SNAKE_CASE : Dict = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__SCREAMING_SNAKE_CASE : Any = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase__ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : int = field(metadata={'''help''': '''Which column contains the label'''} )
_A : str = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the training file'''} )
_A : Optional[str] = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the development file'''} )
_A : Optional[str] = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the test file'''} )
_A : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_A : bool = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_A : bool = field(default=lowerCamelCase__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase: EvalPrediction ) -> Dict:
__SCREAMING_SNAKE_CASE : List[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__SCREAMING_SNAKE_CASE : List[Any] = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__SCREAMING_SNAKE_CASE : Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate()
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(_lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main() | 578 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
'''simple docstring'''
A__ = 384
if "tiny" in model_name:
A__ = [3, 3, 9, 3]
A__ = [96, 192, 384, 768]
if "small" in model_name:
A__ = [3, 3, 27, 3]
A__ = [96, 192, 384, 768]
if "base" in model_name:
A__ = [3, 3, 27, 3]
A__ = [128, 256, 512, 1024]
A__ = 512
if "large" in model_name:
A__ = [3, 3, 27, 3]
A__ = [192, 384, 768, 1536]
A__ = 768
if "xlarge" in model_name:
A__ = [3, 3, 27, 3]
A__ = [256, 512, 1024, 2048]
A__ = 1024
# set label information
A__ = 150
A__ = "huggingface/label-files"
A__ = "ade20k-id2label.json"
A__ = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
A__ = {int(_A ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = ConvNextConfig(
depths=_A , hidden_sizes=_A , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
A__ = UperNetConfig(
backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , )
return config
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
'''simple docstring'''
A__ = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.{j}.gamma', f'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.weight', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.bias', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.weight', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.bias', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((f'backbone.downsample_layers.{i}.0.weight', f'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.0.bias', f'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.weight', f'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.bias', f'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
'''simple docstring'''
A__ = dct.pop(_A )
A__ = val
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
'''simple docstring'''
A__ = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
A__ = model_name_to_url[model_name]
A__ = torch.hub.load_state_dict_from_url(_A , map_location='cpu' )["state_dict"]
A__ = get_upernet_config(_A )
A__ = UperNetForSemanticSegmentation(_A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A__ = state_dict.pop(_A )
if "bn" in key:
A__ = key.replace('bn' , 'batch_norm' )
A__ = val
# rename keys
A__ = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
model.load_state_dict(_A )
# verify on image
A__ = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
A__ = Image.open(requests.get(_A , stream=_A ).raw ).convert('RGB' )
A__ = SegformerImageProcessor()
A__ = processor(_A , return_tensors='pt' ).pixel_values
with torch.no_grad():
A__ = model(_A )
if model_name == "upernet-convnext-tiny":
A__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
A__ = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
A__ = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
A__ = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
A__ = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_A )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 705 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase_ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
f'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'
f' reinstalling {pkg}.' )
if not ops[op](version.parse(SCREAMING_SNAKE_CASE__ ) , version.parse(SCREAMING_SNAKE_CASE__ ) ):
raise ImportError(
f'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' )
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> None:
'''simple docstring'''
A__ = f'\n{hint}' if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , SCREAMING_SNAKE_CASE__ ):
A__ , A__ , A__ = requirement, None, None
else:
A__ = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , SCREAMING_SNAKE_CASE__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
f' got {requirement}' )
A__ , A__ = match[0]
A__ = want_full.split(',' ) # there could be multiple requirements
A__ = {}
for w in want_range:
A__ = re.findall(R'^([\s!=<>]{1,2})(.+)' , SCREAMING_SNAKE_CASE__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
f' but got {requirement}' )
A__ , A__ = match[0]
A__ = want_ver
if op not in ops:
raise ValueError(f'{requirement}: need one of {list(ops.keys() )}, but got {op}' )
# special case
if pkg == "python":
A__ = '.'.join([str(SCREAMING_SNAKE_CASE__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return
# check if any version is installed
try:
A__ = importlib.metadata.version(SCREAMING_SNAKE_CASE__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'The \'{requirement}\' distribution was not found and is required by this application. {hint}' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
'''simple docstring'''
A__ = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 586 | 0 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__A = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
__A = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
__A = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
return float((preds == labels).mean() )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :str = simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :int = np.array(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = np.array(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = en_sentvecs.shape[0]
# mean centering
lowerCAmelCase__ :List[Any] = en_sentvecs - np.mean(_SCREAMING_SNAKE_CASE , axis=0 )
lowerCAmelCase__ :Union[str, Any] = in_sentvecs - np.mean(_SCREAMING_SNAKE_CASE , axis=0 )
lowerCAmelCase__ :Tuple = cdist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'cosine' )
lowerCAmelCase__ :str = np.array(range(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ :int = sim.argsort(axis=1 )[:, :10]
lowerCAmelCase__ :Optional[Any] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
'references': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__UpperCAmelCase , __UpperCAmelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__UpperCAmelCase , __UpperCAmelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
| 93 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A = logging.get_logger(__name__)
__A = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__A = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
__A = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = VOCAB_FILES_NAMES
__magic_name__ :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :str = ["""input_ids""", """attention_mask"""]
__magic_name__ :Any = RobertaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Optional[int] = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
lowerCAmelCase__ :List[Any] = add_prefix_space
lowerCAmelCase__ :str = pre_tok_class(**__UpperCAmelCase )
lowerCAmelCase__ :List[str] = add_prefix_space
lowerCAmelCase__ :str = 'post_processor'
lowerCAmelCase__ :Optional[Any] = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
lowerCAmelCase__ :Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ :Any = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase__ :int = tuple(state['cls'] )
lowerCAmelCase__ :List[Any] = False
if state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Union[str, Any] = add_prefix_space
lowerCAmelCase__ :Any = True
if state.get('trim_offsets' , __UpperCAmelCase ) != trim_offsets:
lowerCAmelCase__ :Union[str, Any] = trim_offsets
lowerCAmelCase__ :Optional[int] = True
if changes_to_apply:
lowerCAmelCase__ :str = getattr(__UpperCAmelCase , state.pop('type' ) )
lowerCAmelCase__ :Any = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
lowerCAmelCase__ :List[str] = value
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = [self.sep_token_id]
lowerCAmelCase__ :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 93 | 1 |
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(a__ , a__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(a__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 715 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , "vision")
self.check_model_type(lowercase_)
def __call__( self : str , lowercase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase_ : Union[str, List[str]] = None , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop("text_queries")
if isinstance(lowercase_ , (str, Image.Image)):
_UpperCamelCase = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCamelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(inputs["image"])
_UpperCamelCase = inputs["candidate_labels"]
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = candidate_labels.split(",")
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowercase_):
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=self.framework)
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowercase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("target_size")
_UpperCamelCase = model_inputs.pop("candidate_label")
_UpperCamelCase = model_inputs.pop("is_last")
_UpperCamelCase = self.model(**lowercase_)
_UpperCamelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[str]=0.1 , lowercase_ : int=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output["candidate_label"]
_UpperCamelCase = BaseModelOutput(lowercase_)
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=lowercase_ , threshold=lowercase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs["scores"][index].item()
_UpperCamelCase = self._get_bounding_box(outputs["boxes"][index][0])
_UpperCamelCase = {"score": score, "label": label, "box": box}
results.append(lowercase_)
_UpperCamelCase = sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)
if top_k:
_UpperCamelCase = results[:top_k]
return results
def __UpperCAmelCase ( self : str , lowercase_ : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_a : List[str] = imread(r'digital_image_processing/image_data/lena_small.jpg')
_a : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = cn.convert_to_negative(_lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCamelCase__ ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_lowerCAmelCase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCamelCase = canny.canny(_lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def UpperCamelCase__ ( ):
'''simple docstring'''
assert gg.gaussian_filter(_lowerCAmelCase , 5 , sigma=0.9 ).all()
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCamelCase = conv.img_convolve(_lowerCAmelCase , _lowerCAmelCase ).astype(_lowerCAmelCase )
assert res.any()
def UpperCamelCase__ ( ):
'''simple docstring'''
assert med.median_filter(_lowerCAmelCase , 3 ).any()
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = sob.sobel_filter(_lowerCAmelCase )
assert grad.any() and theta.any()
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = sp.make_sepia(_lowerCAmelCase , 20 )
assert sepia.all()
def UpperCamelCase__ ( _A: Optional[int] = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCamelCase = bs.Burkes(imread(_lowerCAmelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def UpperCamelCase__ ( _A: List[str] = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCamelCase = rs.NearestNeighbour(imread(_lowerCAmelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
__lowerCamelCase = imread(_lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = image[x_coordinate][y_coordinate]
__lowerCamelCase = lbp.get_neighbors_pixel(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCamelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCamelCase = lbp.local_binary_value(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert lbp_image.any()
| 479 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__a = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def __snake_case( _lowerCAmelCase=True ) -> Dict:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a ) )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = None
lowercase = None
def lowerCamelCase ( self : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
with TemporaryDirectory() as tmp_dir:
snake_case__ : Dict = dataset_module_factory(snake_case_ , cache_dir=snake_case_ )
snake_case__ : Optional[int] = import_main_class(dataset_module.module_path , dataset=snake_case_ )
snake_case__ : DatasetBuilder = builder_cls(
cache_dir=snake_case_ , config_name=snake_case_ , hash=dataset_module.hash , )
snake_case__ : Dict = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=snake_case_ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
snake_case__ : List[str] = cached_path(snake_case_ , cache_dir=snake_case_ )
self.assertTrue(os.path.exists(snake_case_ ) )
@pytest.mark.integration
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[int] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
snake_case__ : Dict = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCAmelCase )
snake_case__ : Dict = import_main_class(dataset_module.module_path )
snake_case__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
snake_case__ : Any = None
builder_instance.download_and_prepare()
snake_case__ : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : Optional[int] = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCAmelCase )
snake_case__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
snake_case__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
snake_case__ : Any = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , _lowerCAmelCase )
assert next(iter(ds["""train"""] ) )
| 374 | 0 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
snake_case__ : Dict = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Tuple = ["""input_features""", """attention_mask"""]
def __init__( self : Any , lowerCamelCase : List[str]=80 , lowerCamelCase : Optional[int]=16_000 , lowerCamelCase : List[str]=80 , lowerCamelCase : Dict=0.0 , lowerCamelCase : int=True , lowerCamelCase : List[Any]=True , lowerCamelCase : Dict=True , **lowerCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(feature_size=lowerCamelCase , sampling_rate=lowerCamelCase , padding_value=lowerCamelCase , **lowerCamelCase )
__lowercase = num_mel_bins
__lowercase = do_ceptral_normalize
__lowercase = normalize_means
__lowercase = normalize_vars
__lowercase = True
def _snake_case ( self : Any , lowerCamelCase : np.ndarray , ):
'''simple docstring'''
__lowercase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__lowercase = torch.from_numpy(lowerCamelCase ).unsqueeze(0 )
__lowercase = ta_kaldi.fbank(lowerCamelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _snake_case ( lowerCamelCase : np.ndarray , lowerCamelCase : int , lowerCamelCase : Optional[bool] = True , lowerCamelCase : Optional[bool] = True , lowerCamelCase : float = 0.0 , ):
'''simple docstring'''
if normalize_means:
__lowercase = x[:input_length].mean(axis=0 )
__lowercase = np.subtract(lowerCamelCase , lowerCamelCase )
if normalize_vars:
__lowercase = x[:input_length].std(axis=0 )
__lowercase = np.divide(lowerCamelCase , lowerCamelCase )
if input_length < x.shape[0]:
__lowercase = padding_value
# make sure array is in float32
__lowercase = x.astype(np.floataa )
return x
def _snake_case ( self : Union[str, Any] , lowerCamelCase : List[np.ndarray] , lowerCamelCase : Optional[np.ndarray] = None ):
'''simple docstring'''
__lowercase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCamelCase , lowerCamelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowerCamelCase , lowerCamelCase )
]
def __call__( self : Dict , lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase : Union[bool, str, PaddingStrategy] = False , lowerCamelCase : Optional[int] = None , lowerCamelCase : bool = False , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , **lowerCamelCase : Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__lowercase = isinstance(lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__lowercase = is_batched_numpy or (
isinstance(lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase = [np.asarray(lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase , np.ndarray ):
__lowercase = np.asarray(lowerCamelCase , dtype=np.floataa )
elif isinstance(lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase = [raw_speech]
# extract fbank features
__lowercase = [self._extract_fbank_features(lowerCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
__lowercase = BatchFeature({"input_features": features} )
__lowercase = self.pad(
lowerCamelCase , padding=lowerCamelCase , max_length=lowerCamelCase , truncation=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , **lowerCamelCase , )
# make sure list is in array format
__lowercase = padded_inputs.get("input_features" )
if isinstance(input_features[0] , lowerCamelCase ):
__lowercase = [np.asarray(lowerCamelCase , dtype=np.floataa ) for feature in input_features]
__lowercase = padded_inputs.get("attention_mask" )
if attention_mask is not None:
__lowercase = [np.asarray(lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__lowercase = (
np.array(lowerCamelCase , dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase , max_length=lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__lowercase = self.normalize(
padded_inputs["input_features"] , attention_mask=lowerCamelCase )
if return_tensors is not None:
__lowercase = padded_inputs.convert_to_tensors(lowerCamelCase )
return padded_inputs
| 717 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _SCREAMING_SNAKE_CASE )
if matches:
__lowercase = float(matches[1] )
__lowercase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__lowercase = 1_0_0_1
__lowercase = "imagenet-1k-id2label.json"
__lowercase = "huggingface/label-files"
__lowercase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ) + 1: v for k, v in idalabel.items()}
__lowercase = "background"
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( ):
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowercase = get_mobilenet_va_config(_SCREAMING_SNAKE_CASE )
# Load 🤗 model
__lowercase = MobileNetVaForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__lowercase = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , )
__lowercase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowercase = model(**_SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
__lowercase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__lowercase = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__lowercase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("Pushing to the hub..." )
__lowercase = "google/" + model_name
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ : Dict = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 655 | 0 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__A : Union[str, Any] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__A : List[str] = logging.WARNING
def snake_case__ ( ) ->Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = os.getenv("DATASETS_VERBOSITY", _lowerCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option DATASETS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def snake_case__ ( ) ->str:
"""simple docstring"""
return __name__.split("." )[0]
def snake_case__ ( ) ->logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def snake_case__ ( ) ->None:
"""simple docstring"""
__lowercase : Union[str, Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def snake_case__ ( ) ->None:
"""simple docstring"""
__lowercase : Any = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def snake_case__ ( _lowerCamelCase = None ) ->logging.Logger:
"""simple docstring"""
if name is None:
__lowercase : int = _get_library_name()
return logging.getLogger(_lowerCamelCase )
def snake_case__ ( ) ->int:
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def snake_case__ ( _lowerCamelCase ) ->None:
"""simple docstring"""
_get_library_root_logger().setLevel(_lowerCamelCase )
def snake_case__ ( ) ->str:
"""simple docstring"""
return set_verbosity(_lowerCamelCase )
def snake_case__ ( ) ->Dict:
"""simple docstring"""
return set_verbosity(_lowerCamelCase )
def snake_case__ ( ) ->int:
"""simple docstring"""
return set_verbosity(_lowerCamelCase )
def snake_case__ ( ) ->Optional[Any]:
"""simple docstring"""
return set_verbosity(_lowerCamelCase )
def snake_case__ ( ) ->None:
"""simple docstring"""
__lowercase : List[Any] = False
def snake_case__ ( ) ->None:
"""simple docstring"""
__lowercase : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[str] , *lowercase__ : List[Any] , **lowercase__ : int ): # pylint: disable=unused-argument
__lowercase : Optional[int] = args[0] if args else None
def __iter__( self : str ):
return iter(self._iterator )
def __getattr__( self : int , lowercase__ : List[str] ):
def empty_fn(*lowercase__ : Tuple , **lowercase__ : int ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ):
return self
def __exit__( self : List[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Tuple ):
return
__A : Tuple = True
class lowerCAmelCase__ :
"""simple docstring"""
def __call__( self : Optional[Any] , *lowercase__ : List[Any] , lowercase__ : List[str]=False , **lowercase__ : Tuple ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowercase__ , **lowercase__ )
else:
return EmptyTqdm(*lowercase__ , **lowercase__ )
def snake_case ( self : Union[str, Any] , *lowercase__ : Any , **lowercase__ : Union[str, Any] ):
__lowercase : Dict = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowercase__ , **lowercase__ )
def snake_case ( self : Any ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__A : Any = _tqdm_cls()
def snake_case__ ( ) ->bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def snake_case__ ( ) ->List[str]:
"""simple docstring"""
global _tqdm_active
__lowercase : Dict = True
def snake_case__ ( ) ->str:
"""simple docstring"""
global _tqdm_active
__lowercase : Optional[Any] = False
| 575 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Dict = PegasusConfig
__UpperCAmelCase : int = {}
__UpperCAmelCase : Tuple = "gelu"
def __init__( self : List[str] , lowercase__ : int , lowercase__ : Union[str, Any]=1_3 , lowercase__ : Dict=7 , lowercase__ : Optional[Any]=True , lowercase__ : str=False , lowercase__ : Optional[int]=9_9 , lowercase__ : Tuple=3_2 , lowercase__ : Any=5 , lowercase__ : Any=4 , lowercase__ : Any=3_7 , lowercase__ : Any=0.1 , lowercase__ : List[str]=0.1 , lowercase__ : Tuple=2_0 , lowercase__ : str=2 , lowercase__ : int=1 , lowercase__ : Dict=0 , ):
__lowercase : int = parent
__lowercase : str = batch_size
__lowercase : Tuple = seq_length
__lowercase : Tuple = is_training
__lowercase : Dict = use_labels
__lowercase : List[str] = vocab_size
__lowercase : int = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : int = intermediate_size
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : List[Any] = max_position_embeddings
__lowercase : int = eos_token_id
__lowercase : Union[str, Any] = pad_token_id
__lowercase : Union[str, Any] = bos_token_id
def snake_case ( self : int ):
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowercase : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase : Optional[Any] = prepare_pegasus_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def snake_case ( self : str , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ):
__lowercase : Union[str, Any] = 2_0
__lowercase : List[Any] = model_class_name(lowercase__ )
__lowercase : Tuple = model.encode(inputs_dict["input_ids"] )
__lowercase ,__lowercase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase : Any = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__lowercase : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowercase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , )
__lowercase : List[Any] = model.decode(lowercase__ , lowercase__ )
__lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def snake_case ( self : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Optional[Any] ):
__lowercase : Any = 2_0
__lowercase : Any = model_class_name(lowercase__ )
__lowercase : List[Any] = model.encode(inputs_dict["input_ids"] )
__lowercase ,__lowercase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__lowercase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : str = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : Union[str, Any] = model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ )
__lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase=None, _lowerCamelCase=None, ) ->int:
"""simple docstring"""
if attention_mask is None:
__lowercase : List[str] = np.not_equal(_lowerCamelCase, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowercase : Optional[int] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCAmelCase : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCAmelCase : Dict = True
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def snake_case ( self : List[Any] ):
__lowercase : Optional[Any] = FlaxPegasusModelTester(self )
__lowercase : Optional[Any] = ConfigTester(self , config_class=lowercase__ )
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
__lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ )
def snake_case ( self : Optional[int] ):
__lowercase ,__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ )
def snake_case ( self : Tuple ):
__lowercase ,__lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = self._prepare_for_class(lowercase__ , lowercase__ )
__lowercase : List[str] = model_class(lowercase__ )
@jax.jit
def encode_jitted(lowercase__ : List[str] , lowercase__ : int=None , **lowercase__ : Tuple ):
return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ )
with self.subTest("JIT Enabled" ):
__lowercase : List[Any] = encode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase : Optional[Any] = encode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( self : Optional[Any] ):
__lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = model_class(lowercase__ )
__lowercase : List[str] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__lowercase : Optional[int] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Any ):
return model.decode(
decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , )
with self.subTest("JIT Enabled" ):
__lowercase : Tuple = decode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase : Any = decode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case ( self : Any ):
for model_class_name in self.all_model_classes:
__lowercase : int = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowercase__ )
__lowercase : Any = np.ones((1, 1) )
__lowercase : Tuple = model(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
def snake_case ( self : Optional[int] ):
__lowercase : str = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__lowercase : Optional[Any] = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__lowercase : Any = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__lowercase : Union[str, Any] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__lowercase : Tuple = tokenizer(lowercase__ , return_tensors="np" , truncation=lowercase__ , max_length=5_1_2 , padding=lowercase__ )
__lowercase : Tuple = model.generate(**lowercase__ , num_beams=2 ).sequences
__lowercase : str = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )
assert tgt_text == decoded
| 575 | 1 |
'''simple docstring'''
def _UpperCamelCase ( _a : int ):
"""simple docstring"""
assert isinstance(_a , _a ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
__UpperCamelCase : Any = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(_a )
else:
__UpperCamelCase : Tuple = sylvester(number - 1 )
__UpperCamelCase : List[str] = num - 1
__UpperCamelCase : int = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 706 | '''simple docstring'''
from ...processing_utils import ProcessorMixin
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''SpeechT5FeatureExtractor'''
SCREAMING_SNAKE_CASE__ = '''SpeechT5Tokenizer'''
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
__UpperCamelCase : Union[str, Any] = kwargs.pop('audio' , _lowerCamelCase )
__UpperCamelCase : Tuple = kwargs.pop('text' , _lowerCamelCase )
__UpperCamelCase : Union[str, Any] = kwargs.pop('text_target' , _lowerCamelCase )
__UpperCamelCase : List[Any] = kwargs.pop('audio_target' , _lowerCamelCase )
__UpperCamelCase : Any = kwargs.pop('sampling_rate' , _lowerCamelCase )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
__UpperCamelCase : Dict = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
elif text is not None:
__UpperCamelCase : int = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
else:
__UpperCamelCase : List[str] = None
if audio_target is not None:
__UpperCamelCase : Dict = self.feature_extractor(audio_target=_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
__UpperCamelCase : Optional[int] = targets['input_values']
elif text_target is not None:
__UpperCamelCase : Any = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
__UpperCamelCase : Optional[int] = targets['input_ids']
else:
__UpperCamelCase : List[str] = None
if inputs is None:
return targets
if targets is not None:
__UpperCamelCase : List[Any] = labels
__UpperCamelCase : Any = targets.get('attention_mask' )
if decoder_attention_mask is not None:
__UpperCamelCase : int = decoder_attention_mask
return inputs
def lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ):
__UpperCamelCase : Optional[Any] = kwargs.pop('input_values' , _lowerCamelCase )
__UpperCamelCase : Union[str, Any] = kwargs.pop('input_ids' , _lowerCamelCase )
__UpperCamelCase : List[Any] = kwargs.pop('labels' , _lowerCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
__UpperCamelCase : Optional[int] = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
elif input_ids is not None:
__UpperCamelCase : int = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
else:
__UpperCamelCase : str = None
if labels is not None:
if "input_ids" in labels or (isinstance(_lowerCamelCase , _lowerCamelCase ) and "input_ids" in labels[0]):
__UpperCamelCase : Dict = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
__UpperCamelCase : str = targets['input_ids']
else:
__UpperCamelCase : int = self.feature_extractor.feature_size
__UpperCamelCase : Any = self.feature_extractor.num_mel_bins
__UpperCamelCase : Dict = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
__UpperCamelCase : List[str] = feature_size_hack
__UpperCamelCase : Tuple = targets['input_values']
else:
__UpperCamelCase : List[str] = None
if inputs is None:
return targets
if targets is not None:
__UpperCamelCase : Optional[int] = labels
__UpperCamelCase : Union[str, Any] = targets.get('attention_mask' )
if decoder_attention_mask is not None:
__UpperCamelCase : Optional[Any] = decoder_attention_mask
return inputs
def lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
| 287 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
UpperCAmelCase__ : Any = number_of_bytes // partitions
UpperCAmelCase__ : List[Any] = []
for i in range(__UpperCamelCase ):
UpperCAmelCase__ : List[Any] = i * bytes_per_partition + 1
UpperCAmelCase__ : Union[str, Any] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"{start_bytes}-{end_bytes}" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return x + 2
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = "x = 3"
a__ = {}
a__ = evaluate(a__ ,{} ,state=a__ )
assert result == 3
self.assertDictEqual(a__ ,{"x": 3} )
a__ = "x = y"
a__ = {"y": 5}
a__ = evaluate(a__ ,{} ,state=a__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a__ ,{"x": 5, "y": 5} )
def lowerCAmelCase_ ( self : str ):
a__ = "y = add_two(x)"
a__ = {"x": 3}
a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ )
assert result == 5
self.assertDictEqual(a__ ,{"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
a__ = evaluate(a__ ,{} ,state=a__ )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase_ ( self : Any ):
a__ = "x = 3"
a__ = {}
a__ = evaluate(a__ ,{} ,state=a__ )
assert result == 3
self.assertDictEqual(a__ ,{"x": 3} )
def lowerCAmelCase_ ( self : Dict ):
a__ = "test_dict = {'x': x, 'y': add_two(x)}"
a__ = {"x": 3}
a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ )
self.assertDictEqual(a__ ,{"x": 3, "y": 5} )
self.assertDictEqual(a__ ,{"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase_ ( self : Dict ):
a__ = "x = 3\ny = 5"
a__ = {}
a__ = evaluate(a__ ,{} ,state=a__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a__ ,{"x": 3, "y": 5} )
def lowerCAmelCase_ ( self : str ):
a__ = "text = f'This is x: {x}.'"
a__ = {"x": 3}
a__ = evaluate(a__ ,{} ,state=a__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(a__ ,{"x": 3, "text": "This is x: 3."} )
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = "if x <= 3:\n y = 2\nelse:\n y = 5"
a__ = {"x": 3}
a__ = evaluate(a__ ,{} ,state=a__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(a__ ,{"x": 3, "y": 2} )
a__ = {"x": 8}
a__ = evaluate(a__ ,{} ,state=a__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a__ ,{"x": 8, "y": 5} )
def lowerCAmelCase_ ( self : List[Any] ):
a__ = "test_list = [x, add_two(x)]"
a__ = {"x": 3}
a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ )
self.assertListEqual(a__ ,[3, 5] )
self.assertDictEqual(a__ ,{"x": 3, "test_list": [3, 5]} )
def lowerCAmelCase_ ( self : Any ):
a__ = "y = x"
a__ = {"x": 3}
a__ = evaluate(a__ ,{} ,state=a__ )
assert result == 3
self.assertDictEqual(a__ ,{"x": 3, "y": 3} )
def lowerCAmelCase_ ( self : Tuple ):
a__ = "test_list = [x, add_two(x)]\ntest_list[1]"
a__ = {"x": 3}
a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ )
assert result == 5
self.assertDictEqual(a__ ,{"x": 3, "test_list": [3, 5]} )
a__ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
a__ = {"x": 3}
a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ )
assert result == 5
self.assertDictEqual(a__ ,{"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase_ ( self : List[Any] ):
a__ = "x = 0\nfor i in range(3):\n x = i"
a__ = {}
a__ = evaluate(a__ ,{"range": range} ,state=a__ )
assert result == 2
self.assertDictEqual(a__ ,{"x": 2, "i": 2} )
| 331 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = ["pixel_values"]
def __init__( self , A_ = True , A_ = 1 / 255 , A_ = True , A_ = 8 , **A_ , ) -> None:
super().__init__(**A_ )
__UpperCamelCase =do_rescale
__UpperCamelCase =rescale_factor
__UpperCamelCase =do_pad
__UpperCamelCase =pad_size
def _a ( self , A_ , A_ , A_ = None , **A_ ) -> np.ndarray:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None ) -> str:
__UpperCamelCase , __UpperCamelCase =get_image_size(A_ )
__UpperCamelCase =(old_height // size + 1) * size - old_height
__UpperCamelCase =(old_width // size + 1) * size - old_width
return pad(A_ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=A_ )
def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> Tuple:
__UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase =do_pad if do_pad is not None else self.do_pad
__UpperCamelCase =pad_size if pad_size is not None else self.pad_size
__UpperCamelCase =make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase =[to_numpy_array(A_ ) for image in images]
if do_rescale:
__UpperCamelCase =[self.rescale(image=A_ , scale=A_ ) for image in images]
if do_pad:
__UpperCamelCase =[self.pad(A_ , size=A_ ) for image in images]
__UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images]
__UpperCamelCase ={'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 682 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 1 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__a = datasets.logging.get_logger(__name__)
__a = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
__a = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
__a = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
__a = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
lowercase_ = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
lowercase_ = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowercase_ = self.config_name.upper()
else:
raise KeyError(
f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
lowercase_ = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowercase_ = score.BleurtScorer(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> Dict:
lowercase_ = self.scorer.score(references=SCREAMING_SNAKE_CASE_ , candidates=SCREAMING_SNAKE_CASE_ )
return {"scores": scores}
| 97 | '''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class __A (__magic_name__ ):
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , *UpperCamelCase_ , **UpperCamelCase_ ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
if config is None:
assert isinstance(self.model , UpperCamelCase_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
__UpperCAmelCase : Optional[int] = self.model.config
else:
__UpperCAmelCase : int = config
__UpperCAmelCase : Union[str, Any] = data_args
__UpperCAmelCase : str = self.config.tgt_vocab_size if isinstance(self.config , UpperCamelCase_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
__UpperCAmelCase : Dict = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__UpperCAmelCase : List[Any] = label_smoothed_nll_loss
def _snake_case ( self , UpperCamelCase_ ):
if self.optimizer is None:
__UpperCAmelCase : Optional[Any] = ["bias", "LayerNorm.weight"]
__UpperCAmelCase : Union[str, Any] = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
__UpperCAmelCase : Optional[int] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__UpperCAmelCase : int = Adafactor
__UpperCAmelCase : Any = {"scale_parameter": False, "relative_step": False}
else:
__UpperCAmelCase : List[str] = AdamW
__UpperCAmelCase : Optional[int] = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
__UpperCAmelCase : Any = self.args.learning_rate
if self.sharded_ddp:
__UpperCAmelCase : Dict = OSS(
params=UpperCamelCase_ , optim=UpperCamelCase_ , **UpperCamelCase_ , )
else:
__UpperCAmelCase : Dict = optimizer_cls(UpperCamelCase_ , **UpperCamelCase_ )
if self.lr_scheduler is None:
__UpperCAmelCase : Dict = self._get_lr_scheduler(UpperCamelCase_ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__UpperCAmelCase : Optional[int] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__UpperCAmelCase : List[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__UpperCAmelCase : Tuple = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCamelCase_ )
return scheduler
def _snake_case ( self ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__UpperCAmelCase : List[str] = model(**UpperCamelCase_ , use_cache=UpperCamelCase_ )[0]
__UpperCAmelCase : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__UpperCAmelCase , __UpperCAmelCase : str = model(**UpperCamelCase_ , labels=UpperCamelCase_ , use_cache=UpperCamelCase_ )[:2]
else:
# compute label smoothed loss
__UpperCAmelCase : Dict = model(**UpperCamelCase_ , use_cache=UpperCamelCase_ )[0]
__UpperCAmelCase : Optional[int] = torch.nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.loss_fn(UpperCamelCase_ , UpperCamelCase_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = inputs.pop("labels" )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._compute_loss(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return loss
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , ):
__UpperCAmelCase : Optional[Any] = self._prepare_inputs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__UpperCAmelCase : Union[str, Any] = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **UpperCamelCase_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase : Optional[int] = self._pad_tensors_to_max_len(UpperCamelCase_ , gen_kwargs["max_length"] )
__UpperCAmelCase : Union[str, Any] = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self._compute_loss(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Dict = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__UpperCAmelCase : Dict = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase : Union[str, Any] = self._pad_tensors_to_max_len(UpperCamelCase_ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
# If PAD token is not defined at least EOS token has to be defined
__UpperCAmelCase : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f""" padded to `max_length`={max_length}""" )
__UpperCAmelCase : Union[str, Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__UpperCAmelCase : Optional[Any] = tensor
return padded_tensor
| 168 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class a ( __UpperCAmelCase ):
lowercase_ : Union[str, Any] = 'mctct'
def __init__( self : Dict , snake_case__ : Any=8_065 , snake_case__ : Tuple=1_536 , snake_case__ : Union[str, Any]=36 , snake_case__ : List[Any]=6_144 , snake_case__ : Tuple=4 , snake_case__ : List[str]=384 , snake_case__ : List[Any]=920 , snake_case__ : Any=1E-5 , snake_case__ : List[Any]=0.3 , snake_case__ : List[str]="relu" , snake_case__ : List[Any]=0.0_2 , snake_case__ : Optional[Any]=0.3 , snake_case__ : List[str]=0.3 , snake_case__ : List[Any]=1 , snake_case__ : Optional[int]=0 , snake_case__ : Any=2 , snake_case__ : Any=1 , snake_case__ : List[str]=0.3 , snake_case__ : Tuple=1 , snake_case__ : Union[str, Any]=(7,) , snake_case__ : List[Any]=(3,) , snake_case__ : List[Any]=80 , snake_case__ : Optional[int]=1 , snake_case__ : List[str]=None , snake_case__ : int="sum" , snake_case__ : Dict=False , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = attention_head_dim
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = layerdrop
__lowerCAmelCase = hidden_act
__lowerCAmelCase = initializer_range
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = conv_glu_dim
__lowerCAmelCase = conv_dropout
__lowerCAmelCase = num_conv_layers
__lowerCAmelCase = input_feat_per_channel
__lowerCAmelCase = input_channels
__lowerCAmelCase = conv_channels
__lowerCAmelCase = ctc_loss_reduction
__lowerCAmelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCAmelCase = list(snake_case__ )
__lowerCAmelCase = list(snake_case__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
| 376 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class a ( __UpperCAmelCase ):
lowercase_ : str = 'distilbert'
lowercase_ : Any = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : Optional[int] , snake_case__ : int=30_522 , snake_case__ : str=512 , snake_case__ : Tuple=False , snake_case__ : Tuple=6 , snake_case__ : Any=12 , snake_case__ : Dict=768 , snake_case__ : Any=4 * 768 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.0_2 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[Any]=0.2 , snake_case__ : str=0 , **snake_case__ : Dict , ):
"""simple docstring"""
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = sinusoidal_pos_embds
__lowerCAmelCase = n_layers
__lowerCAmelCase = n_heads
__lowerCAmelCase = dim
__lowerCAmelCase = hidden_dim
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation
__lowerCAmelCase = initializer_range
__lowerCAmelCase = qa_dropout
__lowerCAmelCase = seq_classif_dropout
super().__init__(**snake_case__ , pad_token_id=snake_case__ )
class a ( __UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 376 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def a_ ( _lowerCAmelCase ) -> List[Any]:
__lowerCamelCase : Dict = torch.load(_lowerCamelCase ,map_location='cpu' )
if "model" in sd.keys():
__lowerCamelCase : Union[str, Any] = torch.load(_lowerCamelCase ,map_location='cpu' )['model']
# pop unnecessary weights
__lowerCamelCase : Union[str, Any] = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCamelCase )
__lowerCamelCase : str = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowerCamelCase : Optional[Any] = sd.pop(_lowerCamelCase )
__lowerCamelCase : Dict = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowerCamelCase : int = sd[key]
# We split QKV in separate Q,K,V
__lowerCamelCase : Union[str, Any] = key.replace('.qkv_proj.' ,'.q_proj.' )
__lowerCamelCase : Any = key.replace('.qkv_proj.' ,'.k_proj.' )
__lowerCamelCase : Tuple = key.replace('.qkv_proj.' ,'.v_proj.' )
__lowerCamelCase : int = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowerCamelCase : Any = torch.split(_lowerCamelCase ,depth // 3 ,dim=0 )
__lowerCamelCase : Tuple = q
__lowerCamelCase : int = k
__lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ) -> Optional[Any]:
__lowerCamelCase : List[Any] = load_checkpoint(_lowerCamelCase )
if config is not None:
__lowerCamelCase : Any = OPTConfig.from_pretrained(_lowerCamelCase )
else:
__lowerCamelCase : str = OPTConfig()
__lowerCamelCase : Tuple = OPTModel(_lowerCamelCase ).half().eval()
model.load_state_dict(_lowerCamelCase )
# Check results
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
_UpperCamelCase = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 459 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase="pt" ):
'''simple docstring'''
_lowerCAmelCase : str = {'add_prefix_space': True} if isinstance(_lowerCamelCase , _lowerCamelCase ) and not line.startswith(' ' ) else {}
_lowerCAmelCase : List[str] = padding_side
return tokenizer(
[line] , max_length=_lowerCamelCase , padding='max_length' if pad_to_max_length else None , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase , **_lowerCamelCase , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase : str = input_ids.ne(_lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A ,_A ,_A="train" ,_A=None ,_A=None ,_A=None ,_A="" ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = Path(_A ).joinpath(type_path + '.source' )
_lowerCAmelCase : Optional[int] = Path(_A ).joinpath(type_path + '.target' )
_lowerCAmelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCAmelCase : Tuple = max_source_length
_lowerCAmelCase : Union[str, Any] = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_lowerCAmelCase : Dict = tokenizer
_lowerCAmelCase : List[Any] = prefix
if n_obs is not None:
_lowerCAmelCase : int = self.src_lens[:n_obs]
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Any = tgt_lang
def __len__( self ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = index + 1 # linecache starts at 1
_lowerCAmelCase : Optional[int] = self.prefix + linecache.getline(str(self.src_file ) ,_A ).rstrip('\n' )
_lowerCAmelCase : Optional[int] = linecache.getline(str(self.tgt_file ) ,_A ).rstrip('\n' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_A ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCAmelCase : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_A ) else self.tokenizer
)
_lowerCAmelCase : Dict = self.tokenizer.generator if isinstance(self.tokenizer ,_A ) else self.tokenizer
_lowerCAmelCase : Union[str, Any] = encode_line(_A ,_A ,self.max_source_length ,'right' )
_lowerCAmelCase : Optional[int] = encode_line(_A ,_A ,self.max_target_length ,'right' )
_lowerCAmelCase : Tuple = source_inputs['input_ids'].squeeze()
_lowerCAmelCase : int = target_inputs['input_ids'].squeeze()
_lowerCAmelCase : Optional[Any] = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
return [len(_A ) for x in Path(_A ).open().readlines()]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = torch.stack([x['input_ids'] for x in batch] )
_lowerCAmelCase : List[Any] = torch.stack([x['attention_mask'] for x in batch] )
_lowerCAmelCase : Any = torch.stack([x['decoder_input_ids'] for x in batch] )
_lowerCAmelCase : List[str] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_A )
else self.tokenizer.pad_token_id
)
_lowerCAmelCase : List[str] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_A )
else self.tokenizer.pad_token_id
)
_lowerCAmelCase : int = trim_batch(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = trim_batch(_A ,_A ,attention_mask=_A )
_lowerCAmelCase : List[str] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_lowerCAmelCase = getLogger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = get_git_info()
save_json(_lowerCamelCase , os.path.join(_lowerCamelCase , 'git_log.json' ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=4 , **_lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase , 'w' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=_lowerCamelCase , **_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase ) as f:
return json.load(_lowerCamelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = git.Repo(search_parent_directories=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = {
'repo_id': str(_lowerCamelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return list(map(_lowerCamelCase , _lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase , 'wb' ) as f:
return pickle.dump(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
def remove_articles(_lowerCamelCase ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _lowerCamelCase )
def white_space_fix(_lowerCamelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = normalize_answer(_lowerCamelCase ).split()
_lowerCAmelCase : List[str] = normalize_answer(_lowerCamelCase ).split()
_lowerCAmelCase : Optional[Any] = Counter(_lowerCamelCase ) & Counter(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sum(common.values() )
if num_same == 0:
return 0
_lowerCAmelCase : Any = 1.0 * num_same / len(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = 1.0 * num_same / len(_lowerCamelCase )
_lowerCAmelCase : str = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = 0
for hypo, pred in zip(_lowerCamelCase , _lowerCamelCase ):
em += exact_match_score(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
em /= len(_lowerCamelCase )
return {"em": em}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCAmelCase : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not hasattr(_lowerCamelCase , _lowerCamelCase ) and not hasattr(_lowerCamelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowerCamelCase ) )
delattr(_lowerCamelCase , _lowerCamelCase )
continue
_lowerCAmelCase : Optional[Any] = p if hasattr(_lowerCamelCase , _lowerCamelCase ) else equivalent_param[p]
setattr(_lowerCamelCase , _lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
delattr(_lowerCamelCase , _lowerCamelCase )
return hparams, config
| 259 | 0 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = 100
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = scope
UpperCamelCase__ = out_indices
UpperCamelCase__ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def _a (self ) -> int:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
UpperCamelCase__ = BeitModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = BeitForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = BeitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = BeitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = BeitForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : str =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Optional[int] =(
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] =False
SCREAMING_SNAKE_CASE_ : Tuple =False
SCREAMING_SNAKE_CASE_ : Tuple =False
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = BeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def _a (self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def _a (self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _a (self ) -> Any:
'''simple docstring'''
pass
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Tuple:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(SCREAMING_SNAKE_CASE_ ), BeitForMaskedImageModeling]:
continue
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase__ = False
UpperCamelCase__ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(SCREAMING_SNAKE_CASE_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def _a (self ) -> List[Any]:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = BeitModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __UpperCamelCase ( ):
UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def _a (self ) -> Optional[Any]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _a (self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values.to(SCREAMING_SNAKE_CASE_ )
# prepare bool_masked_pos
UpperCamelCase__ = torch.ones((1, 196) , dtype=torch.bool ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(pixel_values=SCREAMING_SNAKE_CASE_ , bool_masked_pos=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-2 ) )
@slow
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = torch.Size((1, 1000) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE_ )
@slow
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
UpperCamelCase__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE_ )
@slow
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
UpperCamelCase__ = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = BeitImageProcessor(do_resize=SCREAMING_SNAKE_CASE_ , size=640 , do_center_crop=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCamelCase__ = Image.open(ds[0]['''file'''] )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
UpperCamelCase__ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=SCREAMING_SNAKE_CASE_ , )
else:
UpperCamelCase__ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=SCREAMING_SNAKE_CASE_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
UpperCamelCase__ = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = BeitImageProcessor(do_resize=SCREAMING_SNAKE_CASE_ , size=640 , do_center_crop=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCamelCase__ = Image.open(ds[0]['''file'''] )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs.logits.detach().cpu()
UpperCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ , target_sizes=[(500, 300)] )
UpperCamelCase__ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
| 469 | import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__magic_name__ =pd.read_csv('''sample_data.csv''', header=None)
__magic_name__ =df.shape[:1][0]
# If you're using some other dataset input the target column
__magic_name__ =df.iloc[:, 1:2]
__magic_name__ =actual_data.values.reshape(len_data, 1)
__magic_name__ =MinMaxScaler().fit_transform(actual_data)
__magic_name__ =10
__magic_name__ =5
__magic_name__ =20
__magic_name__ =len_data - periods * look_back
__magic_name__ =actual_data[:division]
__magic_name__ =actual_data[division - look_back :]
__magic_name__ , __magic_name__ =[], []
__magic_name__ , __magic_name__ =[], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__magic_name__ =np.array(train_x)
__magic_name__ =np.array(test_x)
__magic_name__ =np.array([list(i.ravel()) for i in train_y])
__magic_name__ =np.array([list(i.ravel()) for i in test_y])
__magic_name__ =Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
__magic_name__ =model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__magic_name__ =model.predict(x_test)
| 469 | 1 |
"""simple docstring"""
import random
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = num - 1
lowerCAmelCase : Any = 0
while s % 2 == 0:
lowerCAmelCase : List[Any] = s // 2
t += 1
for _ in range(5 ):
lowerCAmelCase : int = random.randrange(2 , num - 1 )
lowerCAmelCase : Tuple = pow(A_ , A_ , A_ )
if v != 1:
lowerCAmelCase : Tuple = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCAmelCase : List[Any] = i + 1
lowerCAmelCase : Dict = (v**2) % num
return True
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if num < 2:
return False
lowerCAmelCase : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A_ )
def a__ ( SCREAMING_SNAKE_CASE : str = 1_0_2_4 ):
'''simple docstring'''
while True:
lowerCAmelCase : List[Any] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A_ ):
return num
if __name__ == "__main__":
lowerCAmelCase__ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 645 |
'''simple docstring'''
import os
from pathlib import Path
def lowerCamelCase_ ( ):
from torch.utils.cpp_extension import load
__lowerCamelCase = Path(A_ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__lowerCamelCase = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , A_ , with_cuda=A_ , extra_include_paths=[str(A_ )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 316 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ : Any = logging.getLogger(__name__)
def a_ ( lowerCamelCase , lowerCamelCase ):
return (preds == labels).mean()
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case__ = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
snake_case__ = field(metadata={"help": "Should contain the data files for the task."} )
snake_case__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def a_ ( ):
UpperCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase__ = processors[data_args.task_name]()
UpperCAmelCase__ = processor.get_labels()
UpperCAmelCase__ = len(lowerCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase ) -> Dict:
UpperCAmelCase__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase , p.label_ids )}
# Data collator
UpperCAmelCase__ = DataCollatorWithPadding(lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase__ = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , compute_metrics=lowerCamelCase , data_collator=lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCAmelCase__ = trainer.evaluate()
UpperCAmelCase__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(lowerCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , lowerCamelCase , lowerCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(lowerCamelCase )
return results
def a_ ( lowerCamelCase ):
main()
if __name__ == "__main__":
main()
| 720 | """simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a_ ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def a_ ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
lowerCAmelCase__ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
lowerCAmelCase__ : str = sorted({word.strip().lower() for word in data.splitlines()})
lowerCAmelCase__ : Optional[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCAmelCase__ : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 632 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Union[List[PIL.Image.Image], np.ndarray]
_lowerCamelCase :Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _lowerCamelCase ( a_ ):
_lowerCamelCase :np.ndarray
_lowerCamelCase :List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 299 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_A = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _lowerCamelCase :
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]=16 , UpperCamelCase : List[str]=13 , UpperCamelCase : Any=7 , UpperCamelCase : str=14 , UpperCamelCase : List[Any]=10 , UpperCamelCase : Any=19 , UpperCamelCase : Optional[int]=5 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=16 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : List[Any]=4 , UpperCamelCase : str=4 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : List[Any]=[1, 2, 3, 4, 5] , UpperCamelCase : str=25 , UpperCamelCase : Any=5 , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = d_model
lowerCAmelCase__ : Tuple = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Dict = prediction_length
lowerCAmelCase__ : Tuple = context_length
lowerCAmelCase__ : Any = cardinality
lowerCAmelCase__ : Any = num_time_features
lowerCAmelCase__ : Tuple = lags_sequence
lowerCAmelCase__ : Tuple = embedding_dimension
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : List[Any] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : int = context_length
lowerCAmelCase__ : Union[str, Any] = prediction_length + label_length
lowerCAmelCase__ : Optional[Any] = label_length
lowerCAmelCase__ : Union[str, Any] = moving_average
lowerCAmelCase__ : Any = autocorrelation_factor
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _lowerCAmelCase ( self : str , UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = config.context_length + max(config.lags_sequence )
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCAmelCase__ : Dict = floats_tensor([self.batch_size, _past_length] )
lowerCAmelCase__ : int = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length] )
lowerCAmelCase__ : Optional[int] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_config()
lowerCAmelCase__ : Optional[Any] = self.prepare_autoformer_inputs_dict(UpperCamelCase )
return config, inputs_dict
def _lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = AutoformerModel(config=UpperCamelCase ).to(UpperCamelCase ).eval()
lowerCAmelCase__ : List[str] = model(**UpperCamelCase )
lowerCAmelCase__ : Any = outputs.encoder_last_hidden_state
lowerCAmelCase__ : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : List[str] = model.get_encoder()
encoder.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : Any = AutoformerEncoder.from_pretrained(UpperCamelCase ).to(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = model.create_network_inputs(**UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCAmelCase__ : int = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCAmelCase__ : List[Any] = encoder(inputs_embeds=UpperCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowerCAmelCase__ : Tuple = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCAmelCase__ : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCAmelCase__ : Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCAmelCase__ : List[Any] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[Any] = model.get_decoder()
decoder.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : List[str] = AutoformerDecoder.from_pretrained(UpperCamelCase ).to(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = decoder(
trend=UpperCamelCase , inputs_embeds=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _lowerCamelCase ( a_ , a_ , unittest.TestCase ):
_lowerCamelCase :Tuple = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCamelCase :int = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCamelCase :int = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_lowerCamelCase :Tuple = False
_lowerCamelCase :int = False
_lowerCamelCase :List[Any] = False
_lowerCamelCase :Optional[int] = False
_lowerCamelCase :int = False
_lowerCamelCase :Any = False
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoformerModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : int = model_class.from_pretrained(UpperCamelCase , output_loading_info=UpperCamelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def _lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = inspect.signature(getattr(UpperCamelCase , """forward""" ) )
# The main input is the name of the argument after `self`
lowerCAmelCase__ : str = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase )
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(UpperCamelCase )
lowerCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase__ : str = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase )] , UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Optional[int] = getattr(self.model_tester , """seq_length""" , UpperCamelCase )
lowerCAmelCase__ : List[str] = getattr(self.model_tester , """decoder_seq_length""" , UpperCamelCase )
lowerCAmelCase__ : Tuple = getattr(self.model_tester , """encoder_seq_length""" , UpperCamelCase )
lowerCAmelCase__ : List[str] = getattr(self.model_tester , """d_model""" , UpperCamelCase )
lowerCAmelCase__ : Any = getattr(self.model_tester , """num_attention_heads""" , UpperCamelCase )
lowerCAmelCase__ : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : Dict = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Tuple = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : str = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCAmelCase__ : int = len(UpperCamelCase )
lowerCAmelCase__ : int = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase , UpperCamelCase )
# decoder attentions
lowerCAmelCase__ : List[str] = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase , (list, tuple) )
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCAmelCase__ : int = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase , (list, tuple) )
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCAmelCase__ : int = True
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Dict = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase ) )
lowerCAmelCase__ : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowercase_ ( __UpperCAmelCase="train-batch.pt" ) -> Optional[int]:
lowerCAmelCase__ : Any = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=__UpperCAmelCase , repo_type="""dataset""" )
lowerCAmelCase__ : Optional[int] = torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase )
return batch
@require_torch
@slow
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCamelCase )
lowerCAmelCase__ : List[str] = prepare_batch()
with torch.no_grad():
lowerCAmelCase__ : Dict = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
lowerCAmelCase__ : str = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCamelCase )
lowerCAmelCase__ : List[Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowerCAmelCase__ : Dict = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
lowerCAmelCase__ : int = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase ) )
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCamelCase )
lowerCAmelCase__ : str = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
lowerCAmelCase__ : Optional[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase )
lowerCAmelCase__ : int = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase )
lowerCAmelCase__ : List[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase , rtol=1E-1 ) )
| 299 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 712 |
import pytest
_lowercase = '''__dummy_dataset1__'''
_lowercase = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = dataset_loading_script_name
lowerCAmelCase_ : List[str] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=snake_case__)
lowerCAmelCase_ : List[Any] = script_dir / F'''{script_name}.py'''
with open(snake_case__ , "w") as f:
f.write(snake_case__)
return str(snake_case__)
| 683 | 0 |
"""simple docstring"""
import math
import os
import sys
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = """"""
try:
with open(SCREAMING_SNAKE_CASE , """rb""" ) as binary_file:
UpperCamelCase : Any = binary_file.read()
for dat in data:
UpperCamelCase : Optional[int] = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lexicon.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase : int = last_match_id
if math.loga(SCREAMING_SNAKE_CASE ).is_integer():
for curr_key in lexicon:
UpperCamelCase : List[Any] = """0""" + lexicon[curr_key]
UpperCamelCase : Optional[Any] = bin(SCREAMING_SNAKE_CASE )[2:]
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = {"""0""": """0""", """1""": """1"""}
UpperCamelCase , UpperCamelCase : int = """""", """"""
UpperCamelCase : int = len(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase : int = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
index += 1
UpperCamelCase : str = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCamelCase : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Any = os.path.getsize(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = bin(SCREAMING_SNAKE_CASE )[2:]
UpperCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = 8
try:
with open(SCREAMING_SNAKE_CASE , """wb""" ) as opened_file:
UpperCamelCase : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = read_file_binary(SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = compress_data(SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = add_file_length(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
write_file_binary(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 102 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''roberta'''
def __init__( self , lowerCamelCase=5_02_65 , lowerCamelCase=7_68 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=30_72 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
UpperCamelCase : Any = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : List[str] = type_vocab_size
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Union[str, Any] = position_embedding_type
UpperCamelCase : Tuple = use_cache
UpperCamelCase : Any = classifier_dropout
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 173 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def __UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=8 ) -> Optional[int]:
'''simple docstring'''
_a = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_a = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
_a = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
if latents is None:
_a = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
_a = latents.to(__UpperCamelCase )
_a = latents * scheduler.init_noise_sigma
return latents
def a_ ( self , __UpperCamelCase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_a = torch.device(f"cuda:{gpu_id}" )
_a = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def a_ ( self , __UpperCamelCase=0 ) -> Optional[int]:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_a = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_a = None
for cpu_offloaded_model in [self.unet, self.movq]:
_a , _a = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
_a = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a_ ( self ) -> int:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 100 , __UpperCamelCase = 4.0 , __UpperCamelCase = 1 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , ) -> List[Any]:
_a = self._execution_device
_a = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_a = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_a = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_a = torch.cat(__UpperCamelCase , dim=0 )
_a = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_a = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
_a = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
_a = hint.repeat_interleave(__UpperCamelCase , dim=0 )
_a = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
_a = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
_a = self.scheduler.timesteps
_a = self.movq.config.latent_channels
_a , _a = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
_a = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = {"image_embeds": image_embeds, "hint": hint}
_a = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
_a , _a = noise_pred.split(latents.shape[1] , dim=1 )
_a , _a = noise_pred.chunk(2 )
_a , _a = variance_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_a = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_a , _a = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
_a = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_a = image * 0.5 + 0.5
_a = image.clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_a = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 711 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = ['''image_processor''', '''tokenizer''']
UpperCAmelCase = '''LayoutLMv2ImageProcessor'''
UpperCAmelCase = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ) -> str:
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCamelCase , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_a = self.image_processor(images=__UpperCamelCase , return_tensors=__UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__UpperCamelCase , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def a_ ( self , __UpperCamelCase , __UpperCamelCase ) -> Dict:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f" {len(__UpperCamelCase )} and {len(__UpperCamelCase )}" )
return images_with_overflow
def a_ ( self , *__UpperCamelCase , **__UpperCamelCase ) -> Tuple:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def a_ ( self , *__UpperCamelCase , **__UpperCamelCase ) -> List[Any]:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def a_ ( self ) -> int:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a_ ( self ) -> Dict:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCamelCase , )
return self.image_processor_class
@property
def a_ ( self ) -> int:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCamelCase , )
return self.image_processor
| 276 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
@property
def _snake_case ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_lowerCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def _snake_case ( self ) -> Any:
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.dummy_uncond_unet
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = self.dummy_vq_model
_lowerCAmelCase = LDMPipeline(unet=_lowerCAmelCase , vqvae=_lowerCAmelCase , scheduler=_lowerCAmelCase )
ldm.to(_lowerCAmelCase )
ldm.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = ldm(generator=_lowerCAmelCase , num_inference_steps=2 , output_type="numpy" ).images
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = ldm(generator=_lowerCAmelCase , num_inference_steps=2 , output_type="numpy" , return_dict=_lowerCAmelCase )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
_lowerCAmelCase = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(_lowerCAmelCase )
ldm.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = ldm(generator=_lowerCAmelCase , num_inference_steps=5 , output_type="numpy" ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
_lowerCAmelCase = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 18 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = ["pixel_values"]
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = None , __a = True , __a = 1 / 255 , __a = True , __a = None , __a = None , __a = True , **__a , ):
'''simple docstring'''
super().__init__(**__a )
__a : Dict = size if size is not None else {'shortest_edge': 224}
__a : Optional[Any] = get_size_dict(__a , default_to_square=__a )
__a : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__a : Dict = get_size_dict(__a , default_to_square=__a , param_name='crop_size' )
__a : int = do_resize
__a : Tuple = size
__a : Optional[int] = resample
__a : List[str] = do_center_crop
__a : int = crop_size
__a : int = do_rescale
__a : List[Any] = rescale_factor
__a : Dict = do_normalize
__a : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
__a : Tuple = do_convert_rgb
def __UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ):
'''simple docstring'''
__a : Optional[Any] = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__a : Tuple = get_resize_output_image_size(__a , size=size['shortest_edge'] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __UpperCAmelCase ( self , __a , __a , __a = None , **__a , ):
'''simple docstring'''
__a : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__a , size=(size['height'], size['width']) , data_format=__a , **__a )
def __UpperCAmelCase ( self , __a , __a , __a = None , **__a , ):
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a )
def __UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ):
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ):
'''simple docstring'''
__a : str = do_resize if do_resize is not None else self.do_resize
__a : Dict = size if size is not None else self.size
__a : Dict = get_size_dict(__a , param_name='size' , default_to_square=__a )
__a : Optional[Any] = resample if resample is not None else self.resample
__a : str = do_center_crop if do_center_crop is not None else self.do_center_crop
__a : Optional[Any] = crop_size if crop_size is not None else self.crop_size
__a : int = get_size_dict(__a , param_name='crop_size' , default_to_square=__a )
__a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__a : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__a : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__a : List[str] = image_mean if image_mean is not None else self.image_mean
__a : List[str] = image_std if image_std is not None else self.image_std
__a : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a : int = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a : Dict = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__a : int = [to_numpy_array(__a ) for image in images]
if do_resize:
__a : Tuple = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__a : List[Any] = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__a : Tuple = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__a : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__a : Optional[Any] = [to_channel_dimension_format(__a , __a ) for image in images]
__a : int = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a )
| 476 | 0 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __lowercase( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 585 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _a ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : float | Decimal , __SCREAMING_SNAKE_CASE : float = 10**-10 ):
"""simple docstring"""
_lowerCAmelCase = a
while True:
_lowerCAmelCase = Decimal(__SCREAMING_SNAKE_CASE ) - (
Decimal(eval(__SCREAMING_SNAKE_CASE ) ) / Decimal(eval(str(diff(__SCREAMING_SNAKE_CASE ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__SCREAMING_SNAKE_CASE ) ) < precision: # noqa: S307
return float(__SCREAMING_SNAKE_CASE )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(F"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(F"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(F"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 585 | 1 |
from __future__ import annotations
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = list(range(len(A_ ) ) )
__magic_name__ = [v / w for v, w in zip(A_, A_ )]
index.sort(key=lambda A_ : ratio[i], reverse=A_ )
__magic_name__ = 0
__magic_name__ = [0] * len(A_ )
for i in index:
if weight[i] <= capacity:
__magic_name__ = 1
max_value += value[i]
capacity -= weight[i]
else:
__magic_name__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 529 |
from __future__ import annotations
def a__ ( A_, A_ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((__magic_name__) , (__magic_name__)) = extended_euclid(A_, a % b )
__magic_name__ = a // b
return (y, x - k * y)
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
((__magic_name__) , (__magic_name__)) = extended_euclid(A_, A_ )
__magic_name__ = na * na
__magic_name__ = ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( A_, A_ ):
'''simple docstring'''
((__magic_name__) , (__magic_name__)) = extended_euclid(A_, A_ )
if b < 0:
__magic_name__ = (b % n + n) % n
return b
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ , __magic_name__ = invert_modulo(A_, A_ ), invert_modulo(A_, A_ )
__magic_name__ = na * na
__magic_name__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 529 | 1 |
from __future__ import annotations
lowercase_ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowercase_ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowerCAmelCase ( UpperCAmelCase ) ->list[float]:
"""simple docstring"""
__magic_name__ : Any = []
__magic_name__ : int = len(UpperCAmelCase )
for i in range(UpperCAmelCase ):
__magic_name__ : float = -1
for j in range(i + 1, UpperCAmelCase ):
if arr[i] < arr[j]:
__magic_name__ : Optional[Any] = arr[j]
break
result.append(UpperCAmelCase )
return result
def lowerCAmelCase ( UpperCAmelCase ) ->list[float]:
"""simple docstring"""
__magic_name__ : List[Any] = []
for i, outer in enumerate(UpperCAmelCase ):
__magic_name__ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
__magic_name__ : Optional[int] = inner
break
result.append(UpperCAmelCase )
return result
def lowerCAmelCase ( UpperCAmelCase ) ->list[float]:
"""simple docstring"""
__magic_name__ : Tuple = len(UpperCAmelCase )
__magic_name__ : list[float] = []
__magic_name__ : list[float] = [-1] * arr_size
for index in reversed(range(UpperCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__magic_name__ : int = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowercase_ = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 711 |
import math
import random
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = False ) ->float:
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowercase_ = 0.0_2
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->float:
"""simple docstring"""
__magic_name__ : Optional[int] = float(2 * (random.randint(1, 100 )) - 1 )
for _ in range(UpperCAmelCase ):
# Forward propagation
__magic_name__ : Optional[Any] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__magic_name__ : Optional[int] = (expected / 100) - layer_a
# Error delta
__magic_name__ : Tuple = layer_1_error * sigmoid_function(UpperCAmelCase, UpperCAmelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input('''Expected value: '''))
lowercase_ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 336 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__ : Any = (("num_inference_steps", 25),)
def __lowercase ( self , **_a ) -> List[str]:
_a : List[str] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**_a )
return config
def __lowercase ( self , _a=0 , **_a ) -> Any:
_a : List[str] = dict(self.forward_default_kwargs )
_a : Optional[Any] = kwargs.pop('''num_inference_steps''' , _a )
_a : Dict = self.dummy_sample
_a : Any = 0.1 * sample
_a : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : List[str] = self.get_scheduler_config(**_a )
_a : str = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
_a : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
_a : List[str] = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
_a : int = dummy_past_residuals[: new_scheduler.config.solver_order]
_a , _a : Any = sample, sample
for t in range(_a , time_step + scheduler.config.solver_order + 1 ):
_a : Dict = scheduler.step(_a , _a , _a , **_a ).prev_sample
_a : List[str] = new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowercase ( self ) -> Dict:
pass
def __lowercase ( self , _a=0 , **_a ) -> List[Any]:
_a : List[str] = dict(self.forward_default_kwargs )
_a : Union[str, Any] = kwargs.pop('''num_inference_steps''' , _a )
_a : Union[str, Any] = self.dummy_sample
_a : List[str] = 0.1 * sample
_a : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : int = self.get_scheduler_config()
_a : Optional[int] = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
_a : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
_a : str = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
_a : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_a : int = scheduler.step(_a , _a , _a , **_a ).prev_sample
_a : int = new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowercase ( self , _a=None , **_a ) -> Dict:
if scheduler is None:
_a : Optional[Any] = self.scheduler_classes[0]
_a : List[str] = self.get_scheduler_config(**_a )
_a : Union[str, Any] = scheduler_class(**_a )
_a : int = self.scheduler_classes[0]
_a : List[Any] = self.get_scheduler_config(**_a )
_a : Union[str, Any] = scheduler_class(**_a )
_a : Optional[Any] = 1_0
_a : List[Any] = self.dummy_model()
_a : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
_a : Any = model(_a , _a )
_a : Dict = scheduler.step(_a , _a , _a ).prev_sample
return sample
def __lowercase ( self ) -> Dict:
_a : Optional[int] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_a : Union[str, Any] = 5_0
_a : Any = self.dummy_model()
_a : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(_a )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_a : List[Any] = model(_a , _a )
_a : Optional[int] = scheduler.step(_a , _a , _a ).prev_sample
_a : Union[str, Any] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def __lowercase ( self ) -> int:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_a )
def __lowercase ( self ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_a : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_a : Dict = self.full_loop(scheduler=_a )
_a : Optional[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
_a : int = DEISMultistepScheduler.from_config(scheduler.config )
_a : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_a : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_a : Union[str, Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_a : Optional[int] = self.full_loop(scheduler=_a )
_a : List[str] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def __lowercase ( self ) -> Union[str, Any]:
self.check_over_configs(thresholding=_a )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , algorithm_type='''dpmsolver++''' , solver_order=_a , solver_type=_a , )
def __lowercase ( self ) -> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __lowercase ( self ) -> Dict:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , )
_a : List[Any] = self.full_loop(
solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , )
assert not torch.isnan(_a ).any(), "Samples have nan numbers"
def __lowercase ( self ) -> List[Any]:
self.check_over_configs(lower_order_final=_a )
self.check_over_configs(lower_order_final=_a )
def __lowercase ( self ) -> List[Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __lowercase ( self ) -> int:
self.check_over_configs(variance_type=_a )
self.check_over_configs(variance_type='''learned_range''' )
def __lowercase ( self ) -> Optional[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=_a , time_step=0 )
def __lowercase ( self ) -> Optional[Any]:
_a : Any = self.full_loop()
_a : Optional[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.full_loop(use_karras_sigmas=_a )
_a : int = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = self.full_loop(prediction_type='''v_prediction''' )
_a : Optional[int] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def __lowercase ( self ) -> str:
_a : List[str] = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_a )
_a : Tuple = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.scheduler_classes[0]
_a : Dict = self.get_scheduler_config(thresholding=_a , dynamic_thresholding_ratio=0 )
_a : str = scheduler_class(**_a )
_a : Dict = 1_0
_a : Optional[Any] = self.dummy_model()
_a : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
_a : Any = model(_a , _a )
_a : int = scheduler.step(_a , _a , _a ).prev_sample
assert sample.dtype == torch.floataa
| 14 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a =logging.get_logger(__name__)
# General docstring
a ="""RegNetConfig"""
# Base docstring
a ="""facebook/regnet-y-040"""
a =[1, 1088, 7, 7]
# Image classification docstring
a ="""facebook/regnet-y-040"""
a ="""tabby, tabby cat"""
a =[
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCamelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
__lowerCamelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,strides=SCREAMING_SNAKE_CASE__ ,padding='VALID' ,groups=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution' ,)
__lowerCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
__lowerCamelCase : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : List[Any] = self.convolution(self.padding(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Union[str, Any] = self.normalization(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Dict):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = config.num_channels
__lowerCamelCase : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[int] = shape_list(SCREAMING_SNAKE_CASE__)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCamelCase : Optional[int] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 2, 3, 1))
__lowerCamelCase : List[Any] = self.embedder(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,strides=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution')
__lowerCamelCase : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False):
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE__) ,training=SCREAMING_SNAKE_CASE__)
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
__lowerCamelCase : Dict = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='relu' ,name='attention.0'),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2'),
]
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowerCamelCase : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE__)
for layer_module in self.attention:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = hidden_state * pooled
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[Any]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = in_channels != out_channels or stride != 1
__lowerCamelCase : Union[str, Any] = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : Dict = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.2'),
]
__lowerCamelCase : Dict = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : int = hidden_state
for layer_module in self.layers:
__lowerCamelCase : List[str] = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : int = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[str]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = in_channels != out_channels or stride != 1
__lowerCamelCase : Tuple = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : int = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetSELayer(SCREAMING_SNAKE_CASE__ ,reduced_channels=int(round(in_channels / 4)) ,name='layer.2'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.3'),
]
__lowerCamelCase : List[Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[int] = hidden_state
for layer_module in self.layers:
__lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : Any = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
__lowerCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='layers.0'),
*[layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,name=F"layers.{i+1}") for i in range(depth - 1)],
]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]):
for layer_module in self.layers:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,))
__lowerCamelCase : Optional[int] = zip(config.hidden_sizes ,config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE__ ,config.depths[1:])):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,depth=SCREAMING_SNAKE_CASE__ ,name=F"stages.{i+1}"))
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True):
__lowerCamelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase : Optional[Any] = hidden_states + (hidden_state,)
__lowerCamelCase : str = stage_module(SCREAMING_SNAKE_CASE__)
if output_hidden_states:
__lowerCamelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__)
@keras_serializable
class A_ ( tf.keras.layers.Layer ):
_UpperCAmelCase : List[Any] = RegNetConfig
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = config
__lowerCamelCase : Optional[int] = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE__ ,name='embedder')
__lowerCamelCase : Union[str, Any] = TFRegNetEncoder(SCREAMING_SNAKE_CASE__ ,name='encoder')
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
@unpack_inputs
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,):
__lowerCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self.encoder(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = encoder_outputs[0]
__lowerCamelCase : int = self.pooler(SCREAMING_SNAKE_CASE__)
# Change to NCHW output format have uniformity in the modules
__lowerCamelCase : Union[str, Any] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
__lowerCamelCase : str = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCamelCase : Union[str, Any] = tuple([tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = RegNetConfig
_UpperCAmelCase : Optional[int] = '''regnet'''
_UpperCAmelCase : List[Any] = '''pixel_values'''
@property
def lowerCAmelCase ( self : int):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa)}
a =r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
a =r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : int=False ,):
__lowerCamelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Tuple = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : str):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = config.num_labels
__lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
# classification head
__lowerCamelCase : Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='classifier.1') if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Any=False ,):
__lowerCamelCase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : str = self.regnet(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : Optional[Any] = self.classifier[0](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.classifier[1](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__)
if not return_dict:
__lowerCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states)
| 652 | 0 |
snake_case = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages | 535 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
snake_case = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str ,__A : int = 101 ) -> Optional[int]:
_lowercase = length
def __len__( self : List[Any] ) -> Any:
return self.length
def __getitem__( self : List[Any] ,__A : Optional[int] ) -> int:
return i
class A_ :
"""simple docstring"""
def __call__( self : str ,__A : Union[str, Any] ) -> Any:
return {"input_ids": torch.tensor(__A ), "labels": torch.tensor(__A )}
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ) -> Any:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowercase = nn.Linear(120 ,80 )
def __UpperCAmelCase ( self : Dict ,__A : Dict ,__A : Any=None ) -> Tuple:
if labels is not None:
return torch.tensor(0.0 ,device=input_ids.device ), input_ids
else:
return input_ids
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@require_torch_neuroncore
def __UpperCAmelCase ( self : int ) -> Any:
_lowercase = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
_lowercase = self.get_auto_remove_tmp_dir()
_lowercase = F"""--output_dir {output_dir}""".split()
_lowercase = ['torchrun'] + distributed_args + args
execute_subprocess_async(__A ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@require_torch_multi_gpu
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
_lowercase = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
_lowercase = self.get_auto_remove_tmp_dir()
_lowercase = F"""--output_dir {output_dir}""".split()
_lowercase = ['torchrun'] + distributed_args + args
execute_subprocess_async(__A ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
snake_case = HfArgumentParser((TrainingArguments,))
snake_case = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
snake_case = DummyDataset(dataset_length)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :EvalPrediction ) -> Dict:
_lowercase = list(range(len(snake_case__ ) ) )
_lowercase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
snake_case = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
snake_case = 2
snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
snake_case = None | 535 | 1 |
import os
import string
import sys
a_ : List[str] = 1 << 8
a_ : Union[str, Any] = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 2_7,
'up': 6_5 + ARROW_KEY_FLAG,
'down': 6_6 + ARROW_KEY_FLAG,
'right': 6_7 + ARROW_KEY_FLAG,
'left': 6_8 + ARROW_KEY_FLAG,
'mod_int': 9_1,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 5_0,
'delete': 5_1,
'pg_up': 5_3,
'pg_down': 5_4,
}
a_ : Optional[Any] = KEYMAP['up']
a_ : List[str] = KEYMAP['left']
if sys.platform == "win32":
a_ : List[Any] = []
a_ : Optional[int] = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(1_0):
a_ : Optional[Any] = ord(str(i))
def __lowercase( ):
"""simple docstring"""
if os.name == "nt":
import msvcrt
lowerCamelCase = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(UpperCAmelCase__ ) == 0:
# Read the keystroke
lowerCamelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCamelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCamelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(UpperCAmelCase__ )
if ord(UpperCAmelCase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCamelCase = chr(KEYMAP["esc"] )
except KeyError:
lowerCamelCase = cha[1]
else:
lowerCamelCase = ch.decode(UpperCAmelCase__ )
else:
lowerCamelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCamelCase = sys.stdin.fileno()
lowerCamelCase = termios.tcgetattr(UpperCAmelCase__ )
try:
tty.setraw(UpperCAmelCase__ )
lowerCamelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(UpperCAmelCase__ , termios.TCSADRAIN , UpperCAmelCase__ )
return ch
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = get_raw_chars()
if ord(UpperCAmelCase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(UpperCAmelCase__ ) == KEYMAP["esc"]:
lowerCamelCase = get_raw_chars()
if ord(UpperCAmelCase__ ) == KEYMAP["mod_int"]:
lowerCamelCase = get_raw_chars()
if ord(UpperCAmelCase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(UpperCAmelCase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(UpperCAmelCase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 623 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = AudioLDMPipeline
_A = TEXT_TO_AUDIO_PARAMS
_A = TEXT_TO_AUDIO_BATCH_PARAMS
_A = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
])
def _a (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__a , )
lowerCamelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , )
lowerCamelCase = ClapTextModelWithProjection(__a )
lowerCamelCase = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 )
lowerCamelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__a , )
lowerCamelCase = SpeechTaHifiGan(__a )
lowerCamelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def _a (self , __a , __a=0 ):
'''simple docstring'''
if str(__a ).startswith("mps" ):
lowerCamelCase = torch.manual_seed(__a )
else:
lowerCamelCase = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = audioldm_pipe(**__a )
lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 2_56
lowerCamelCase = audio[:10]
lowerCamelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = 3 * [inputs["prompt"]]
# forward
lowerCamelCase = audioldm_pipe(**__a )
lowerCamelCase = output.audios[0]
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = 3 * [inputs.pop("prompt" )]
lowerCamelCase = audioldm_pipe.tokenizer(
__a , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
lowerCamelCase = text_inputs["input_ids"].to(__a )
lowerCamelCase = audioldm_pipe.text_encoder(
__a , )
lowerCamelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase = F.normalize(__a , dim=-1 )
lowerCamelCase = prompt_embeds
# forward
lowerCamelCase = audioldm_pipe(**__a )
lowerCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = 3 * ["this is a negative prompt"]
lowerCamelCase = negative_prompt
lowerCamelCase = 3 * [inputs["prompt"]]
# forward
lowerCamelCase = audioldm_pipe(**__a )
lowerCamelCase = output.audios[0]
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = 3 * [inputs.pop("prompt" )]
lowerCamelCase = []
for p in [prompt, negative_prompt]:
lowerCamelCase = audioldm_pipe.tokenizer(
__a , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
lowerCamelCase = text_inputs["input_ids"].to(__a )
lowerCamelCase = audioldm_pipe.text_encoder(
__a , )
lowerCamelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase = F.normalize(__a , dim=-1 )
embeds.append(__a )
lowerCamelCase , lowerCamelCase = embeds
# forward
lowerCamelCase = audioldm_pipe(**__a )
lowerCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = PNDMScheduler(skip_prk_steps=__a )
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = "egg cracking"
lowerCamelCase = audioldm_pipe(**__a , negative_prompt=__a )
lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 2_56
lowerCamelCase = audio[:10]
lowerCamelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = PNDMScheduler(skip_prk_steps=__a )
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
lowerCamelCase = audioldm_pipe(__a , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCamelCase = 2
lowerCamelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
lowerCamelCase = 2
lowerCamelCase = audioldm_pipe(__a , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
lowerCamelCase = 2
lowerCamelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = audioldm_pipe.vocoder.config.sampling_rate
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = audioldm_pipe(audio_length_in_s=0.016 , **__a )
lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.016
lowerCamelCase = audioldm_pipe(audio_length_in_s=0.032 , **__a )
lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.032
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = ["hey"]
lowerCamelCase = audioldm_pipe(__a , num_inference_steps=1 )
lowerCamelCase = output.audios.shape
assert audio_shape == (1, 2_56)
lowerCamelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCamelCase = SpeechTaHifiGan(__a ).to(__a )
lowerCamelCase = audioldm_pipe(__a , num_inference_steps=1 )
lowerCamelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def _a (self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a )
def _a (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=__a )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _a (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a )
@slow
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self , __a , __a="cpu" , __a=torch.floataa , __a=0 ):
'''simple docstring'''
lowerCamelCase = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase = np.random.RandomState(__a ).standard_normal((1, 8, 1_28, 16) )
lowerCamelCase = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def _a (self ):
'''simple docstring'''
lowerCamelCase = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_inputs(__a )
lowerCamelCase = 25
lowerCamelCase = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 8_19_20
lowerCamelCase = audio[7_72_30:7_72_40]
lowerCamelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
lowerCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
lowerCamelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_inputs(__a )
lowerCamelCase = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 8_19_20
lowerCamelCase = audio[2_77_80:2_77_90]
lowerCamelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
lowerCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2 | 623 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : List[Any] = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 |
from __future__ import annotations
from collections.abc import Generator
def SCREAMING_SNAKE_CASE ( ) -> Generator[int, None, None]:
lowerCamelCase__ : dict[int, int] = {}
lowerCamelCase__ : Union[str, Any] = 2
while True:
lowerCamelCase__ : Optional[int] = factor_map.pop(_UpperCAmelCase , _UpperCAmelCase )
if factor:
lowerCamelCase__ : Optional[Any] = factor + prime
while x in factor_map:
x += factor
lowerCamelCase__ : Optional[Any] = factor
else:
lowerCamelCase__ : Union[str, Any] = prime
yield prime
prime += 1
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 1e10 ) -> int:
lowerCamelCase__ : Tuple = sieve()
lowerCamelCase__ : Dict = 1
while True:
lowerCamelCase__ : List[Any] = next(_UpperCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_UpperCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 188 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''mra'''
def __init__( self : str , __lowerCamelCase : Tuple=5_0_2_6_5 , __lowerCamelCase : List[str]=7_6_8 , __lowerCamelCase : Union[str, Any]=1_2 , __lowerCamelCase : Optional[int]=1_2 , __lowerCamelCase : Union[str, Any]=3_0_7_2 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Union[str, Any]=5_1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Optional[Any]=1e-5 , __lowerCamelCase : List[Any]="absolute" , __lowerCamelCase : int=4 , __lowerCamelCase : List[Any]="full" , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : Any=0 , __lowerCamelCase : Dict=1 , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=2 , **__lowerCamelCase : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = block_per_row
_SCREAMING_SNAKE_CASE = approx_mode
_SCREAMING_SNAKE_CASE = initial_prior_first_n_blocks
_SCREAMING_SNAKE_CASE = initial_prior_diagonal_n_blocks
| 418 | 0 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Optional[Any] = {}
snake_case_ : Optional[Any] = job["""started_at"""]
snake_case_ : Tuple = job["""completed_at"""]
snake_case_ : Tuple = date_parser.parse(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = date_parser.parse(SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case_ : int = start
snake_case_ : str = end
snake_case_ : Optional[Any] = duration_in_min
return job_info
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
"""simple docstring"""
snake_case_ : List[str] = None
if token is not None:
snake_case_ : Optional[int] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
snake_case_ : Optional[int] = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
snake_case_ : Optional[Any] = requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ ).json()
snake_case_ : Optional[int] = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(SCREAMING_SNAKE_CASE__ ) for job in result["""jobs"""]} )
snake_case_ : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
snake_case_ : Optional[int] = requests.get(url + f'&page={i + 2}' , headers=SCREAMING_SNAKE_CASE__ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(SCREAMING_SNAKE_CASE__ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
a_ = parser.parse_args()
a_ = get_job_time(args.workflow_run_id)
a_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 48 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 48 | 1 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase__ = _symbol_database.Default()
lowerCAmelCase__ = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
lowerCAmelCase__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase__ = None
lowerCAmelCase__ = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase__ = 45
lowerCAmelCase__ = 1_581
lowerCAmelCase__ = 1_517
lowerCAmelCase__ = 1_570
lowerCAmelCase__ = 1_584
lowerCAmelCase__ = 1_793
lowerCAmelCase__ = 1_795
lowerCAmelCase__ = 1_916
lowerCAmelCase__ = 1_864
lowerCAmelCase__ = 1_905
lowerCAmelCase__ = 1_919
lowerCAmelCase__ = 2_429
lowerCAmelCase__ = 2_208
lowerCAmelCase__ = 2_418
lowerCAmelCase__ = 2_323
lowerCAmelCase__ = 2_407
# @@protoc_insertion_point(module_scope)
| 321 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class __a ( __snake_case ):
lowerCamelCase : Union[str, Any] ='open-llama'
def __init__( self , UpperCAmelCase=10_0000 , UpperCAmelCase=4096 , UpperCAmelCase=1_1008 , UpperCAmelCase=32 , UpperCAmelCase=32 , UpperCAmelCase="silu" , UpperCAmelCase=2048 , UpperCAmelCase=0.0_2 , UpperCAmelCase=1E-6 , UpperCAmelCase=True , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = rms_norm_eps
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , UpperCAmelCase )
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_dropout_prob
lowerCAmelCase_ = use_stable_embedding
lowerCAmelCase_ = shared_input_output_embedding
lowerCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase , )
def lowerCamelCase_ ( self ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
lowerCAmelCase_ = self.rope_scaling.get('''type''' , UpperCAmelCase )
lowerCAmelCase_ = self.rope_scaling.get('''factor''' , UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(UpperCAmelCase , UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" ) | 552 | 0 |
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ = float(_A )
except ValueError:
raise ValueError('''Please enter a valid number''' )
SCREAMING_SNAKE_CASE__ = decimal - int(_A )
if fractional_part == 0:
return int(_A ), 1
else:
SCREAMING_SNAKE_CASE__ = len(str(_A ).split('''.''' )[1] )
SCREAMING_SNAKE_CASE__ = int(decimal * (10**number_of_frac_digits) )
SCREAMING_SNAKE_CASE__ = 10**number_of_frac_digits
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = denominator, numerator
while True:
SCREAMING_SNAKE_CASE__ = dividend % divisor
if remainder == 0:
break
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = divisor, remainder
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = numerator / divisor, denominator / divisor
return int(_A ), int(_A )
if __name__ == "__main__":
print(F"{decimal_to_fraction(2) = }")
print(F"{decimal_to_fraction(8_9.0) = }")
print(F"{decimal_to_fraction('67') = }")
print(F"{decimal_to_fraction('45.0') = }")
print(F"{decimal_to_fraction(1.5) = }")
print(F"{decimal_to_fraction('6.25') = }")
print(F"{decimal_to_fraction('78td') = }")
| 705 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = KandinskyImgaImgPipeline
a = ["prompt", "image_embeds", "negative_image_embeds", "image"]
a = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a = False
@property
def lowercase_ ( self : str ) -> List[str]:
return 32
@property
def lowercase_ ( self : Optional[int] ) -> int:
return 32
@property
def lowercase_ ( self : Union[str, Any] ) -> int:
return self.time_input_dim
@property
def lowercase_ ( self : List[str] ) -> int:
return self.time_input_dim * 4
@property
def lowercase_ ( self : Union[str, Any] ) -> Any:
return 100
@property
def lowercase_ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def lowercase_ ( self : List[Any] ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE__ = MultilingualCLIP(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self : str ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowercase_ ( self : Dict ) -> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : Tuple ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_unet
SCREAMING_SNAKE_CASE__ = self.dummy_movq
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ = DDIMScheduler(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=0 ) -> str:
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(__lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ = '''cpu'''
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ = pipeline(
__lowerCamelCase , image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 472 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 42
__lowerCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 42
__lowerCamelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 79 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ =get_tests_dir("fixtures/test_sentencepiece.model")
UpperCAmelCase__ ={"target_lang": "fi", "source_lang": "en"}
UpperCAmelCase__ =">>zh<<"
UpperCAmelCase__ ="Helsinki-NLP/"
if is_torch_available():
UpperCAmelCase__ ="pt"
elif is_tf_available():
UpperCAmelCase__ ="tf"
else:
UpperCAmelCase__ ="jax"
@require_sentencepiece
class lowerCamelCase__ ( _a , unittest.TestCase ):
a : Dict = MarianTokenizer
a : Optional[int] = False
a : Any = True
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
super().setUp()
__lowercase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowercase = dict(zip(A_ , range(len(A_ ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(A_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(A_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(A_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(A_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowercase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Dict , **A_ : Any ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **A_ )
def SCREAMING_SNAKE_CASE_ ( self : int , A_ : List[str] ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = """</s>"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(A_ ) , 9 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
__lowercase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=A_ )
self.assertIsInstance(A_ , A_ )
__lowercase = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(A_ , batch.input_ids[0] )
__lowercase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(A_ )
__lowercase = [x.name for x in Path(A_ ).glob("""*""" )]
self.assertIn("""source.spm""" , A_ )
MarianTokenizer.from_pretrained(A_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = tok(
["""I am a small frog""" * 1_0_0_0, """I am a small frog"""] , padding=A_ , truncation=A_ , return_tensors=A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=A_ , return_tensors=A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = {"""input_ids""": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowercase = """Tämä on testi"""
__lowercase = """This is a test"""
__lowercase = [7_6, 7, 2_0_4_7, 2]
__lowercase = [6_9, 1_2, 1_1, 9_4_0, 2]
__lowercase = tokenizer(A_ ).input_ids
self.assertListEqual(A_ , A_ )
__lowercase = tokenizer(text_target=A_ ).input_ids
self.assertListEqual(A_ , A_ )
__lowercase = tokenizer.decode(A_ , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 616 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Dict = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(_lowerCamelCase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(_lowerCamelCase , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(_lowerCamelCase):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}')
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self :Any , a :bool = True , a :Dict[str, int] = None , a :PILImageResampling = PILImageResampling.BILINEAR , a :bool = True , a :Dict[str, int] = None , a :bool = True , a :Union[int, float] = 1 / 2_5_5 , a :bool = True , a :Optional[Union[float, List[float]]] = None , a :Optional[Union[float, List[float]]] = None , **a :Union[str, Any] , ) -> None:
super().__init__(**a )
__UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_2_4}
__UpperCamelCase : Optional[int] = get_size_dict(a , default_to_square=a )
__UpperCamelCase : List[str] = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__UpperCamelCase : Optional[int] = get_size_dict(a , param_name="crop_size" )
__UpperCamelCase : int = do_resize
__UpperCamelCase : List[str] = size
__UpperCamelCase : str = do_center_crop
__UpperCamelCase : Tuple = crop_size
__UpperCamelCase : Optional[int] = resample
__UpperCamelCase : str = do_rescale
__UpperCamelCase : str = rescale_factor
__UpperCamelCase : Tuple = do_normalize
__UpperCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self :Union[str, Any] , a :np.ndarray , a :Dict[str, int] , a :PILImageResampling = PILImageResampling.BILINEAR , a :Optional[Union[str, ChannelDimension]] = None , **a :Tuple , ) -> np.ndarray:
__UpperCamelCase : Any = get_size_dict(a , default_to_square=a )
if "shortest_edge" in size:
__UpperCamelCase : Dict = get_resize_output_image_size(a , size["shortest_edge"] , default_to_square=a )
elif "height" in size and "width" in size:
__UpperCamelCase : int = (size["height"], size["width"])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(a , size=a , resample=a , data_format=a , **a )
def _lowerCamelCase ( self :Any , a :np.ndarray , a :Dict[str, int] , a :Optional[Union[str, ChannelDimension]] = None , **a :str , ) -> np.ndarray:
__UpperCamelCase : List[str] = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def _lowerCamelCase ( self :Any , a :np.ndarray , a :Union[int, float] , a :Optional[Union[str, ChannelDimension]] = None , **a :Tuple , ) -> Optional[Any]:
return rescale(a , scale=a , data_format=a , **a )
def _lowerCamelCase ( self :Optional[Any] , a :np.ndarray , a :Union[float, List[float]] , a :Union[float, List[float]] , a :Optional[Union[str, ChannelDimension]] = None , **a :List[str] , ) -> np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def _lowerCamelCase ( self :List[str] , a :ImageInput , a :bool = None , a :Dict[str, int] = None , a :PILImageResampling = None , a :bool = None , a :Dict[str, int] = None , a :bool = None , a :float = None , a :bool = None , a :Optional[Union[float, List[float]]] = None , a :Optional[Union[float, List[float]]] = None , a :Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__UpperCamelCase : Optional[Any] = to_numpy_array(a )
if do_resize:
__UpperCamelCase : List[str] = self.resize(image=a , size=a , resample=a )
if do_center_crop:
__UpperCamelCase : Optional[int] = self.center_crop(a , size=a )
if do_rescale:
__UpperCamelCase : Optional[Any] = self.rescale(image=a , scale=a )
if do_normalize:
__UpperCamelCase : Dict = self.normalize(image=a , mean=a , std=a )
__UpperCamelCase : Tuple = to_channel_dimension_format(a , a )
return image
def _lowerCamelCase ( self :Optional[Any] , a :ImageInput , a :bool = None , a :Dict[str, int] = None , a :PILImageResampling = None , a :bool = None , a :Dict[str, int] = None , a :bool = None , a :float = None , a :bool = None , a :Optional[Union[float, List[float]]] = None , a :Optional[Union[float, List[float]]] = None , a :Optional[Union[str, TensorType]] = None , a :ChannelDimension = ChannelDimension.FIRST , **a :Optional[Any] , ) -> PIL.Image.Image:
__UpperCamelCase : int = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase : str = resample if resample is not None else self.resample
__UpperCamelCase : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase : int = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase : Any = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase : Optional[int] = image_std if image_std is not None else self.image_std
__UpperCamelCase : List[str] = size if size is not None else self.size
__UpperCamelCase : List[Any] = get_size_dict(a , default_to_square=a )
__UpperCamelCase : str = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase : str = get_size_dict(a , param_name="crop_size" )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
__UpperCamelCase : Optional[Any] = make_batched(a )
__UpperCamelCase : List[str] = [
[
self._preprocess_image(
image=a , do_resize=a , size=a , resample=a , do_center_crop=a , crop_size=a , do_rescale=a , rescale_factor=a , do_normalize=a , image_mean=a , image_std=a , data_format=a , )
for img in video
]
for video in videos
]
__UpperCamelCase : Union[str, Any] = {"pixel_values": videos}
return BatchFeature(data=a , tensor_type=a ) | 94 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Optional[Any] = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'speech_to_text_2'
_A = ['past_key_values']
_A = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self :Dict , a :Tuple=1_0_0_0_0 , a :Optional[int]=6 , a :List[str]=2_0_4_8 , a :Tuple=4 , a :List[Any]=0.0 , a :str=True , a :Any="relu" , a :Any=2_5_6 , a :Optional[int]=0.1 , a :Any=0.0 , a :int=0.0 , a :int=0.02 , a :List[Any]=2 , a :Tuple=True , a :str=1 , a :Optional[int]=0 , a :List[Any]=2 , a :Any=1_0_2_4 , **a :str , ) -> int:
__UpperCamelCase : List[str] = vocab_size
__UpperCamelCase : int = d_model
__UpperCamelCase : Optional[int] = decoder_ffn_dim
__UpperCamelCase : Any = decoder_layers
__UpperCamelCase : Any = decoder_attention_heads
__UpperCamelCase : Tuple = dropout
__UpperCamelCase : Any = attention_dropout
__UpperCamelCase : Any = activation_dropout
__UpperCamelCase : Dict = activation_function
__UpperCamelCase : int = init_std
__UpperCamelCase : List[str] = decoder_layerdrop
__UpperCamelCase : Optional[Any] = use_cache
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase : Dict = max_target_positions
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , ) | 94 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.