code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A__: str = logging.get_logger(__name__)
A__: Optional[int] = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict = "marian"
__UpperCamelCase : Dict = ["past_key_values"]
__UpperCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self :Any , SCREAMING_SNAKE_CASE :Union[str, Any]=5_8_1_0_1 , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :Union[str, Any]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=4_0_9_6 , SCREAMING_SNAKE_CASE :Tuple=1_6 , SCREAMING_SNAKE_CASE :str=1_2 , SCREAMING_SNAKE_CASE :Any=4_0_9_6 , SCREAMING_SNAKE_CASE :Dict=1_6 , SCREAMING_SNAKE_CASE :List[str]=0.0 , SCREAMING_SNAKE_CASE :int=0.0 , SCREAMING_SNAKE_CASE :str=True , SCREAMING_SNAKE_CASE :Union[str, Any]=True , SCREAMING_SNAKE_CASE :List[Any]="gelu" , SCREAMING_SNAKE_CASE :int=1_0_2_4 , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.0 , SCREAMING_SNAKE_CASE :List[str]=0.0 , SCREAMING_SNAKE_CASE :str=0.02 , SCREAMING_SNAKE_CASE :str=5_8_1_0_0 , SCREAMING_SNAKE_CASE :Optional[Any]=False , SCREAMING_SNAKE_CASE :str=5_8_1_0_0 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :Tuple=0 , SCREAMING_SNAKE_CASE :List[Any]=True , **SCREAMING_SNAKE_CASE :Dict , ) -> Optional[int]:
'''simple docstring'''
_a : str =vocab_size
_a : List[str] =decoder_vocab_size or vocab_size
_a : Optional[Any] =max_position_embeddings
_a : Dict =d_model
_a : List[str] =encoder_ffn_dim
_a : Any =encoder_layers
_a : str =encoder_attention_heads
_a : Optional[Any] =decoder_ffn_dim
_a : Optional[Any] =decoder_layers
_a : Dict =decoder_attention_heads
_a : str =dropout
_a : str =attention_dropout
_a : str =activation_dropout
_a : Tuple =activation_function
_a : List[str] =init_std
_a : List[Any] =encoder_layerdrop
_a : Dict =decoder_layerdrop
_a : Tuple =use_cache
_a : List[Any] =encoder_layers
_a : Optional[Any] =scale_embedding # scale factor will be sqrt(d_model) if True
_a : Any =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , forced_eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
class A__ ( UpperCAmelCase__ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_a : Optional[Any] =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_a : Any ={0: """batch"""}
_a : Tuple ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_a : str ={0: """batch""", 1: """decoder_sequence"""}
_a : Any ={0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a : str =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_a , _a : Dict =self.num_layers
for i in range(SCREAMING_SNAKE_CASE ):
_a : Optional[int] ={0: """batch""", 2: """past_sequence + sequence"""}
_a : List[Any] ={0: """batch""", 2: """past_sequence + sequence"""}
else:
_a : str =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_a : Optional[int] =super().outputs
else:
_a : Optional[int] =super(SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
_a , _a : Optional[int] =self.num_layers
for i in range(SCREAMING_SNAKE_CASE ):
_a : Tuple ={0: """batch""", 2: """past_sequence + sequence"""}
_a : List[str] ={0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_a : Optional[Any] =self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Generate decoder inputs
_a : List[Any] =seq_length if not self.use_past else 1
_a : Union[str, Any] =self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] ={f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
_a : Union[str, Any] =dict(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_a , _a : Any =common_inputs["""input_ids"""].shape
_a : Any =common_inputs["""decoder_input_ids"""].shape[1]
_a , _a : List[str] =self.num_attention_heads
_a : Union[str, Any] =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a : Union[str, Any] =decoder_seq_length + 3
_a : List[Any] =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a : List[Any] =torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )] , dim=1 )
_a : List[str] =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a : List[Any] =self.num_layers
_a : Any =min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : List[Any] =max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - min_num_layers
_a : List[str] ="""encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
_a : Dict =encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) )
return common_inputs
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_a : Optional[Any] =self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_a , _a : Union[str, Any] =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_a : Union[str, Any] =seqlen + 2
_a , _a : Union[str, Any] =self.num_layers
_a , _a : Tuple =self.num_attention_heads
_a : Any =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a : List[str] =common_inputs["""attention_mask"""].dtype
_a : Optional[int] =torch.cat(
[common_inputs["""attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 )
_a : List[Any] =[
(torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(SCREAMING_SNAKE_CASE )
]
return common_inputs
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a : Optional[Any] =compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a : Union[str, Any] =tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE )
_a : Any =compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
_a : Optional[Any] =[""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a : int =dict(tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE ) )
return common_inputs
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_a : Dict =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
else:
_a : Any =self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
return common_inputs
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_a : Any =super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
_a : Dict =super(SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
def __UpperCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1e-4
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Optional[int] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = "markuplm"
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :int=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=5_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=2 , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Any=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE :Tuple=2_1_6 , SCREAMING_SNAKE_CASE :Dict=1_0_0_1 , SCREAMING_SNAKE_CASE :List[str]=3_2 , SCREAMING_SNAKE_CASE :List[str]=5_0 , SCREAMING_SNAKE_CASE :Dict="absolute" , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Any =vocab_size
_a : List[str] =hidden_size
_a : List[str] =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Tuple =intermediate_size
_a : Optional[Any] =hidden_dropout_prob
_a : int =attention_probs_dropout_prob
_a : Any =max_position_embeddings
_a : List[Any] =type_vocab_size
_a : List[Any] =initializer_range
_a : List[Any] =layer_norm_eps
_a : Optional[int] =position_embedding_type
_a : List[Any] =use_cache
_a : List[str] =classifier_dropout
# additional properties
_a : int =max_depth
_a : Union[str, Any] =max_xpath_tag_unit_embeddings
_a : str =max_xpath_subs_unit_embeddings
_a : int =tag_pad_id
_a : List[Any] =subs_pad_id
_a : str =xpath_unit_hidden_size
| 694 | 1 |
from __future__ import annotations
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = []
__lowercase = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__lowercase = len(_UpperCamelCase ) if (len(_UpperCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_UpperCamelCase ) , '''Postfix'''.center(_UpperCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_UpperCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_UpperCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_UpperCamelCase ) == 0:
stack.append(_UpperCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_UpperCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_UpperCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_UpperCamelCase )).ljust(_UpperCamelCase ) , (''''''.join(_UpperCamelCase )).ljust(_UpperCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_UpperCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_UpperCamelCase )).ljust(_UpperCamelCase ) , (''''''.join(_UpperCamelCase )).ljust(_UpperCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_UpperCamelCase ) # return Postfix as str
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_UpperCamelCase ) ):
if infix[i] == "(":
__lowercase = ''')''' # change "(" to ")"
elif infix[i] == ")":
__lowercase = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_UpperCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a : Union[str, Any] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
a : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 527 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
lowerCAmelCase_ = {
'facebook/m2m100_418M': 10_24,
}
# fmt: off
lowerCAmelCase_ = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : List[int] = []
_UpperCamelCase : List[int] = []
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int]=None , _A : Union[str, Any]=None , _A : Any="<s>" , _A : Union[str, Any]="</s>" , _A : List[Any]="</s>" , _A : Any="<pad>" , _A : Any="<unk>" , _A : Optional[int]="m2m100" , _A : Optional[Dict[str, Any]] = None , _A : Any=8 , **_A : List[str] , ) -> None:
"""simple docstring"""
lowercase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase : Union[str, Any] = language_codes
lowercase : List[Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowercase : Any = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
lowercase : int = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_A )
for lang_code in fairseq_language_code
if self.get_lang_token(_A ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_A , tgt_lang=_A , bos_token=_A , eos_token=_A , sep_token=_A , unk_token=_A , pad_token=_A , language_codes=_A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_A , **_A , )
lowercase : Any = vocab_file
lowercase : Dict = load_json(_A )
lowercase : List[str] = {v: k for k, v in self.encoder.items()}
lowercase : Optional[int] = spm_file
lowercase : int = load_spm(_A , self.sp_model_kwargs )
lowercase : Optional[Any] = len(self.encoder )
lowercase : str = {
self.get_lang_token(_A ): self.encoder_size + i for i, lang_code in enumerate(_A )
}
lowercase : Dict = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_A )}
lowercase : int = {v: k for k, v in self.lang_token_to_id.items()}
lowercase : List[str] = src_lang if src_lang is not None else '''en'''
lowercase : Union[str, Any] = tgt_lang
lowercase : List[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowercase : Dict = num_madeup_words
@property
def __a ( self : Any ) -> int:
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __a ( self : Optional[Any] , _A : str ) -> None:
"""simple docstring"""
lowercase : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __a ( self : Dict , _A : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_A , out_type=_A )
def __a ( self : Dict , _A : List[str] ) -> Dict:
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_A , self.encoder[self.unk_token] )
def __a ( self : List[Any] , _A : int ) -> str:
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_A , self.unk_token )
def __a ( self : Optional[Any] , _A : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase : List[Any] = []
lowercase : str = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_A ) + token
lowercase : Optional[Any] = []
else:
current_sub_tokens.append(_A )
out_string += self.sp_model.decode(_A )
return out_string.strip()
def __a ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
lowercase : int = [1] * len(self.prefix_tokens )
lowercase : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def __a ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
lowercase : int = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase : int = self.__dict__.copy()
lowercase : List[str] = None
return state
def __setstate__( self : Optional[int] , _A : Dict ) -> None:
"""simple docstring"""
lowercase : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : List[Any] = {}
lowercase : Tuple = load_spm(self.spm_file , self.sp_model_kwargs )
def __a ( self : List[str] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase : List[str] = Path(_A )
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""" )
lowercase : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowercase : Any = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _A )
if os.path.abspath(self.spm_file ) != os.path.abspath(_A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _A )
elif not os.path.isfile(self.spm_file ):
with open(_A , '''wb''' ) as fi:
lowercase : int = self.sp_model.serialized_model_proto()
fi.write(_A )
return (str(_A ), str(_A ))
def __a ( self : Optional[int] , _A : List[str] , _A : str = "en" , _A : Optional[List[str]] = None , _A : str = "ro" , **_A : Optional[Any] , ) -> BatchEncoding:
"""simple docstring"""
lowercase : Any = src_lang
lowercase : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_A , _A , **_A )
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Optional[str] , _A : Optional[str] , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase : List[Any] = src_lang
lowercase : Union[str, Any] = self(_A , add_special_tokens=_A , **_A )
lowercase : int = self.get_lang_id(_A )
lowercase : Any = tgt_lang_id
return inputs
def __a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def __a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __a ( self : Union[str, Any] , _A : str ) -> None:
"""simple docstring"""
lowercase : List[Any] = self.get_lang_token(_A )
lowercase : Optional[Any] = self.lang_token_to_id[lang_token]
lowercase : Optional[Any] = [self.cur_lang_id]
lowercase : Tuple = [self.eos_token_id]
def __a ( self : Union[str, Any] , _A : str ) -> None:
"""simple docstring"""
lowercase : Union[str, Any] = self.get_lang_token(_A )
lowercase : List[Any] = self.lang_token_to_id[lang_token]
lowercase : str = [self.cur_lang_id]
lowercase : Dict = [self.eos_token_id]
def __a ( self : Optional[int] , _A : str ) -> str:
"""simple docstring"""
return self.lang_code_to_token[lang]
def __a ( self : Optional[Any] , _A : str ) -> int:
"""simple docstring"""
lowercase : Any = self.get_lang_token(_A )
return self.lang_token_to_id[lang_token]
def snake_case( __magic_name__ , __magic_name__ ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
lowercase : Any = sentencepiece.SentencePieceProcessor(**__magic_name__ )
spm.Load(str(__magic_name__ ) )
return spm
def snake_case( __magic_name__ ) -> Union[Dict, List]:
'''simple docstring'''
with open(__magic_name__ , '''r''' ) as f:
return json.load(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
with open(__magic_name__ , '''w''' ) as f:
json.dump(__magic_name__ , __magic_name__ , indent=2 ) | 217 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('T')
class _A ( Generic[T] ):
def __init__( self : Optional[Any] , _A : T ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = data
lowercase : Node[T] | None = None
def __str__( self : List[str] ) -> str:
"""simple docstring"""
return f"""{self.data}"""
class _A ( Generic[T] ):
def __init__( self : str ) -> None:
"""simple docstring"""
lowercase : Node[T] | None = None
def __iter__( self : List[str] ) -> Iterator[T]:
"""simple docstring"""
lowercase : int = self.top
while node:
yield node.data
lowercase : List[str] = node.next
def __str__( self : Tuple ) -> str:
"""simple docstring"""
return "->".join([str(_A ) for item in self] )
def __len__( self : Tuple ) -> int:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def __a ( self : List[str] ) -> bool:
"""simple docstring"""
return self.top is None
def __a ( self : List[Any] , _A : T ) -> None:
"""simple docstring"""
lowercase : Any = Node(_A )
if not self.is_empty():
lowercase : str = self.top
lowercase : Any = node
def __a ( self : List[Any] ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , _A )
lowercase : int = self.top
lowercase : Optional[int] = self.top.next
return pop_node.data
def __a ( self : Optional[Any] ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def __a ( self : Dict ) -> None:
"""simple docstring"""
lowercase : str = None
if __name__ == "__main__":
from doctest import testmod
testmod() | 217 | 1 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class SCREAMING_SNAKE_CASE__ ( __A ):
def __init__( self , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE_ : int = split if split or isinstance(lowercase__ , lowercase__ ) else '''train'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = features
SCREAMING_SNAKE_CASE_ : List[Any] = cache_dir
SCREAMING_SNAKE_CASE_ : Optional[int] = keep_in_memory
SCREAMING_SNAKE_CASE_ : Tuple = streaming
SCREAMING_SNAKE_CASE_ : Dict = num_proc
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs
@abstractmethod
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
class SCREAMING_SNAKE_CASE__ ( __A ):
def __init__( self , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = features
SCREAMING_SNAKE_CASE_ : Any = cache_dir
SCREAMING_SNAKE_CASE_ : Union[str, Any] = keep_in_memory
SCREAMING_SNAKE_CASE_ : int = streaming
SCREAMING_SNAKE_CASE_ : Dict = num_proc
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs
@abstractmethod
def __lowerCamelCase ( self ):
"""simple docstring"""
pass
| 708 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'spiece.model'}
snake_case_ = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
snake_case_ = {
'AI-Sweden/gpt-sw3-126m': 2_0_4_8,
'AI-Sweden/gpt-sw3-350m': 2_0_4_8,
'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8,
'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8,
'AI-Sweden/gpt-sw3-20b': 2_0_4_8,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
def __init__( self , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : Dict = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
SCREAMING_SNAKE_CASE_ : str = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE_ : List[Any] = "<|endoftext|>" if eos_token is None else eos_token
SCREAMING_SNAKE_CASE_ : Dict = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE_ : Tuple = unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE_ : int = "<pad>" if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Any = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase__ , remove_space=lowercase__ , keep_accents=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_ : Optional[int] = remove_space
SCREAMING_SNAKE_CASE_ : int = keep_accents
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE_ : int = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
F"[{''.join(map(lowercase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Dict = None
return state
def __setstate__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.non_printing_characters_re.sub("" , lowercase__ )
# Normalize whitespaces
SCREAMING_SNAKE_CASE_ : List[str] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE_ : List[Any] = unicodedata.normalize("NFC" , lowercase__ )
return text
def __lowerCamelCase ( self , lowercase__ , **lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.preprocess_text(lowercase__ )
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.PieceToId(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowercase__ )
@staticmethod
def __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
return out_string
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Any = ""
SCREAMING_SNAKE_CASE_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = []
else:
current_sub_tokens.append(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = False
out_string += self.sp_model.decode(lowercase__ )
return out_string
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , "wb" ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def __lowerCamelCase ( self , lowercase__ , lowercase__ = False ):
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.preprocess_text(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.encode(lowercase__ )
else:
SCREAMING_SNAKE_CASE_ : str = [self.preprocess_text(lowercase__ ) for t in text]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.encode(lowercase__ )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE_ : str = torch.tensor(lowercase__ )
return token_ids
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.decode(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE_ : List[str] = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(lowercase__ ) + F"{self.bos_token}Bot:"
)
return self.encode(text=lowercase__ )
| 68 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(lowerCamelCase ):
print(F"{i}\t\t{d}" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : Any =(graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Any =[float("""inf""" )] * vertex_count
__magic_name__ : Optional[int] =0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCamelCase ):
__magic_name__ , __magic_name__ , __magic_name__ : List[str] =(graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
__magic_name__ : Optional[Any] =distance[u] + w
__magic_name__ : Optional[Any] =check_negative_cycle(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Dict = int(input("Enter number of vertices: ").strip())
UpperCAmelCase_ : Any = int(input("Enter number of edges: ").strip())
UpperCAmelCase_ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
UpperCAmelCase_ : str = {"src": src, "dst": dest, "weight": weight}
UpperCAmelCase_ : List[Any] = int(input("\nEnter shortest path source:").strip())
UpperCAmelCase_ : str = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 21 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = 1
@register_to_config
def __init__( self :Any , __snake_case :Tuple=20_00 , __snake_case :Optional[Any]=0.1 , __snake_case :Any=20 , __snake_case :Optional[int]=1E-3 ):
'''simple docstring'''
__magic_name__ : Dict =None
__magic_name__ : List[str] =None
__magic_name__ : str =None
def A__ ( self :Dict , __snake_case :Optional[int] , __snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def A__ ( self :List[str] , __snake_case :List[str] , __snake_case :int , __snake_case :int , __snake_case :List[str]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__magic_name__ : int =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__magic_name__ : Optional[int] =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__magic_name__ : str =std.flatten()
while len(std.shape ) < len(score.shape ):
__magic_name__ : List[str] =std.unsqueeze(-1 )
__magic_name__ : Union[str, Any] =-score / std
# compute
__magic_name__ : Tuple =-1.0 / len(self.timesteps )
__magic_name__ : int =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__magic_name__ : Dict =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__magic_name__ : Any =beta_t.unsqueeze(-1 )
__magic_name__ : Dict =-0.5 * beta_t * x
__magic_name__ : Optional[int] =torch.sqrt(__snake_case )
__magic_name__ : int =drift - diffusion**2 * score
__magic_name__ : List[str] =x + drift * dt
# add noise
__magic_name__ : Optional[int] =randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
__magic_name__ : Optional[Any] =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 21 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Any = 'yolos'
def __init__( self : Dict , lowerCamelCase__ : str=768 , lowerCamelCase__ : List[str]=12 , lowerCamelCase__ : Union[str, Any]=12 , lowerCamelCase__ : str=3_072 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : List[str]=0.0 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Dict=0.0_2 , lowerCamelCase__ : Optional[int]=1e-1_2 , lowerCamelCase__ : Any=[512, 864] , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : Any=True , lowerCamelCase__ : str=100 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Dict=1 , lowerCamelCase__ : List[Any]=5 , lowerCamelCase__ : int=2 , lowerCamelCase__ : int=5 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Optional[Any]=0.1 , **lowerCamelCase__ : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Dict = version.parse('1.11' )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> float:
"""simple docstring"""
return 1e-4
@property
def UpperCAmelCase_ ( self : List[Any] ) -> int:
"""simple docstring"""
return 12
| 362 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = BlipImageProcessor()
__lowercase = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__lowercase = BlipaProcessor(lowerCamelCase__ , lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : Optional[int] , **lowerCamelCase__ : Any ) -> str:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).tokenizer
def UpperCAmelCase_ ( self : Dict , **lowerCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).image_processor
def UpperCAmelCase_ ( self : List[str] ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
__lowercase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(lowerCamelCase__ , return_tensors='''np''' )
__lowercase = processor(images=lowerCamelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = '''lower newer'''
__lowercase = processor(text=lowerCamelCase__ )
__lowercase = tokenizer(lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(lowerCamelCase__ )
__lowercase = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 362 | 1 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
SCREAMING_SNAKE_CASE__ : int = {
'facebook/blenderbot_small-90M': 5_12,
}
class lowerCamelCase_ ( __snake_case ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = BlenderbotSmallTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=__lowerCAmelCase , merges=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , ) , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , **__lowerCAmelCase , )
__magic_name__ :Tuple = add_prefix_space
def A ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [self.sep_token_id]
__magic_name__ :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase : List[str] = logging.get_logger('transformers.models.encodec')
lowercase : str = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
lowercase : Optional[int] = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
lowercase : int = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
lowercase : int = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
lowercase : List[Any] = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
lowercase : Dict = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase : Union[str, Any] = []
lowercase : int = []
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
for attribute in key.split('''.''' ):
A : int = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
A : int = getattr(snake_case__ , snake_case__ ).shape
else:
A : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
A : str = value
elif weight_type == "weight_g":
A : Union[str, Any] = value
elif weight_type == "weight_v":
A : List[Any] = value
elif weight_type == "bias":
A : Union[str, Any] = value
elif weight_type == "running_mean":
A : Union[str, Any] = value
elif weight_type == "running_var":
A : List[str] = value
elif weight_type == "num_batches_tracked":
A : List[Any] = value
elif weight_type == "weight_ih_l0":
A : Optional[int] = value
elif weight_type == "weight_hh_l0":
A : List[Any] = value
elif weight_type == "bias_ih_l0":
A : List[Any] = value
elif weight_type == "bias_hh_l0":
A : int = value
elif weight_type == "weight_ih_l1":
A : Tuple = value
elif weight_type == "weight_hh_l1":
A : List[str] = value
elif weight_type == "bias_ih_l1":
A : Any = value
elif weight_type == "bias_hh_l1":
A : Tuple = value
else:
A : str = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A, A : List[str] = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A : str = MAPPING_24K
elif model_name == "encodec_48khz":
A : List[Any] = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(snake_case__ , snake_case__ ):
logger.info(F'{name} was ignored' )
continue
A : int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A, A : Optional[int] = key.split('''.*.''' )
if prefix in name and suffix in name:
A : Dict = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
A : Union[str, Any] = True
if "*" in mapped_key:
A : List[str] = name.split(snake_case__ )[0].split('''.''' )[-2]
A : Optional[Any] = mapped_key.replace('''*''' , snake_case__ )
if "weight_g" in name:
A : Dict = '''weight_g'''
elif "weight_v" in name:
A : List[str] = '''weight_v'''
elif "weight_ih_l0" in name:
A : int = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
A : Any = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
A : Dict = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
A : List[Any] = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
A : Any = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
A : Tuple = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
A : Dict = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
A : List[str] = '''bias_hh_l1'''
elif "bias" in name:
A : int = '''bias'''
elif "weight" in name:
A : Optional[Any] = '''weight'''
elif "running_mean" in name:
A : Optional[Any] = '''running_mean'''
elif "running_var" in name:
A : Dict = '''running_var'''
elif "num_batches_tracked" in name:
A : List[str] = '''num_batches_tracked'''
else:
A : int = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ):
'''simple docstring'''
if config_path is not None:
A : Dict = EncodecConfig.from_pretrained(snake_case__ )
else:
A : Dict = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A : Optional[Any] = [8, 5, 4, 4]
A : List[Any] = [2.2]
A : Tuple = 64
A : str = 3_2000
A : Optional[Any] = 2048
A : Optional[int] = False
A : Optional[int] = False
A : List[Any] = False
elif model_name == "encodec_48khz":
A : List[str] = [8, 5, 4, 2]
A : Any = [3.0, 6.0, 12.0, 24.0]
A : Tuple = 4_8000
A : Optional[int] = 2
A : Dict = False
A : Union[str, Any] = '''time_group_norm'''
A : List[Any] = True
A : Union[str, Any] = 1.0
A : Union[str, Any] = 0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
A : Optional[Any] = EncodecModel(snake_case__ )
A : Optional[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(snake_case__ )
A : Optional[int] = torch.load(snake_case__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A : Union[str, Any] = original_checkpoint['''best_state''']
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
model.save_pretrained(snake_case__ )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(snake_case__ )
model.push_to_hub(snake_case__ )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowercase : Union[str, Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 634 | 0 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_lowerCAmelCase = TypeVar('''KEY''')
_lowerCAmelCase = TypeVar('''VAL''')
@dataclass(frozen=SCREAMING_SNAKE_CASE__ , slots=SCREAMING_SNAKE_CASE__ )
class A ( Generic[KEY, VAL] ):
'''simple docstring'''
A = 42
A = 42
class A ( _Item ):
'''simple docstring'''
def __init__(self ) -> None:
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __bool__(self ) -> bool:
return False
_lowerCAmelCase = _DeletedItem()
class A ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase = 8 , _UpperCAmelCase = 0.75 ) -> None:
__UpperCamelCase : List[Any] = initial_block_size
__UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase : Dict = capacity_factor
__UpperCamelCase : List[str] = 0
def a_ (self , _UpperCAmelCase ) -> int:
return hash(_UpperCAmelCase ) % len(self._buckets )
def a_ (self , _UpperCAmelCase ) -> int:
return (ind + 1) % len(self._buckets )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
__UpperCamelCase : Any = self._buckets[ind]
if not stored:
__UpperCamelCase : Any = _Item(_UpperCAmelCase , _UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase : int = _Item(_UpperCAmelCase , _UpperCAmelCase )
return True
else:
return False
def a_ (self ) -> bool:
__UpperCamelCase : Optional[int] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_UpperCAmelCase )
def a_ (self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase : int = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a_ (self , _UpperCAmelCase ) -> None:
__UpperCamelCase : str = self._buckets
__UpperCamelCase : List[str] = [None] * new_size
__UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def a_ (self ) -> None:
self._resize(len(self._buckets ) * 2 )
def a_ (self ) -> None:
self._resize(len(self._buckets ) // 2 )
def a_ (self , _UpperCAmelCase ) -> Iterator[int]:
__UpperCamelCase : Union[str, Any] = self._get_bucket_index(_UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase : Union[str, Any] = self._get_next_ind(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
for ind in self._iterate_buckets(_UpperCAmelCase ):
if self._try_set(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
break
def __setitem__(self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
if self._is_full():
self._size_up()
self._add_item(_UpperCAmelCase , _UpperCAmelCase )
def __delitem__(self , _UpperCAmelCase ) -> None:
for ind in self._iterate_buckets(_UpperCAmelCase ):
__UpperCamelCase : Union[str, Any] = self._buckets[ind]
if item is None:
raise KeyError(_UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase : Any = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self , _UpperCAmelCase ) -> VAL:
for ind in self._iterate_buckets(_UpperCAmelCase ):
__UpperCamelCase : Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_UpperCAmelCase )
def __len__(self ) -> int:
return self._len
def __iter__(self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__(self ) -> str:
__UpperCamelCase : Union[str, Any] = " ,".join(
f"{item.key}: {item.val}" for item in self._buckets if item )
return f"HashMap({val_string})"
| 399 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 399 | 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowercase_ ( __A : str ) -> str:
"""simple docstring"""
re.sub('''<n>''' , '''''' , __A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__A ) )
| 94 | def snake_case (__lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_snake_case : Union[str, Any] = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_snake_case : Union[str, Any] = grid[row_n]
_snake_case : List[Any] = fill_row(__lowercase , __lowercase )
_snake_case : List[Any] = grid[row_n]
return grid[-1][-1]
def snake_case (__lowercase , __lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 0 |
from collections.abc import Callable
def a ( SCREAMING_SNAKE_CASE_ : Callable[[float], float] , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
UpperCamelCase : Optional[int] = a
UpperCamelCase : Dict = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCamelCase : int = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
UpperCamelCase : Dict = mid
else:
UpperCamelCase : List[str] = mid
UpperCamelCase : Optional[int] = start + (end - start) / 2.0
return mid
def a ( SCREAMING_SNAKE_CASE_ : float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 721 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : str = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__UpperCAmelCase : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__UpperCAmelCase , __UpperCAmelCase : Tuple = gray_img.shape
# set different points to rotate image
__UpperCAmelCase : Optional[int] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__UpperCAmelCase : Optional[int] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__UpperCAmelCase : Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__UpperCAmelCase : int = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__UpperCAmelCase : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__UpperCAmelCase : List[str] = plt.figure(1)
__UpperCAmelCase : Dict = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 643 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
A_ = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
A_ = {
"RUCAIBox/mvp": 1024,
}
class lowercase_ ( lowerCAmelCase_ ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["input_ids", "attention_mask"]
A_ = MvpTokenizer
def __init__( self : Optional[int] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="replace" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : int="<mask>" , __lowerCamelCase : int=False , __lowerCamelCase : List[str]=True , **__lowerCamelCase : Dict , ):
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
snake_case__ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __lowerCamelCase ) != add_prefix_space:
snake_case__ : str = getattr(__lowerCamelCase , pre_tok_state.pop('type' ) )
snake_case__ : int = add_prefix_space
snake_case__ : List[str] = pre_tok_class(**__lowerCamelCase )
snake_case__ : Tuple = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case__ : Dict = 'post_processor'
snake_case__ : Optional[int] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
snake_case__ : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ : int = tuple(state['sep'] )
if "cls" in state:
snake_case__ : Union[str, Any] = tuple(state['cls'] )
snake_case__ : Dict = False
if state.get('add_prefix_space' , __lowerCamelCase ) != add_prefix_space:
snake_case__ : List[Any] = add_prefix_space
snake_case__ : List[str] = True
if state.get('trim_offsets' , __lowerCamelCase ) != trim_offsets:
snake_case__ : str = trim_offsets
snake_case__ : List[Any] = True
if changes_to_apply:
snake_case__ : Optional[int] = getattr(__lowerCamelCase , state.pop('type' ) )
snake_case__ : str = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
def _lowerCAmelCase ( self : str ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCAmelCase ( self : Tuple , __lowerCamelCase : Optional[Any] ):
snake_case__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
snake_case__ : Optional[int] = value
def _lowerCAmelCase ( self : List[str] , *__lowerCamelCase : List[str] , **__lowerCamelCase : List[Any] ):
snake_case__ : Optional[int] = kwargs.get('is_split_into_words' , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCAmelCase ( self : List[str] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ):
snake_case__ : Tuple = kwargs.get('is_split_into_words' , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
snake_case__ : List[str] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def _lowerCAmelCase ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=None ):
snake_case__ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
snake_case__ : Dict = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 270 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __A ( metaclass=a ):
"""simple docstring"""
A_ = ['flax']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Dict:
requires_backends(cls , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Dict:
requires_backends(cls , ['''flax'''] )
class __A ( metaclass=a ):
"""simple docstring"""
A_ = ['flax']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> int:
requires_backends(self , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Optional[int]:
requires_backends(cls , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class __A ( metaclass=a ):
"""simple docstring"""
A_ = ['flax']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class __A ( metaclass=a ):
"""simple docstring"""
A_ = ['flax']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> str:
requires_backends(self , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class __A ( metaclass=a ):
"""simple docstring"""
A_ = ['flax']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> str:
requires_backends(cls , ['''flax'''] )
class __A ( metaclass=a ):
"""simple docstring"""
A_ = ['flax']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __A ( metaclass=a ):
"""simple docstring"""
A_ = ['flax']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> str:
requires_backends(self , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> int:
requires_backends(cls , ['''flax'''] )
class __A ( metaclass=a ):
"""simple docstring"""
A_ = ['flax']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> List[Any]:
requires_backends(cls , ['''flax'''] )
class __A ( metaclass=a ):
"""simple docstring"""
A_ = ['flax']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> int:
requires_backends(cls , ['''flax'''] )
class __A ( metaclass=a ):
"""simple docstring"""
A_ = ['flax']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Optional[int]:
requires_backends(cls , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Tuple:
requires_backends(cls , ['''flax'''] )
class __A ( metaclass=a ):
"""simple docstring"""
A_ = ['flax']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Dict:
requires_backends(cls , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Tuple:
requires_backends(cls , ['''flax'''] )
class __A ( metaclass=a ):
"""simple docstring"""
A_ = ['flax']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class __A ( metaclass=a ):
"""simple docstring"""
A_ = ['flax']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def snake_case_( cls , *_lowerCamelCase , **_lowerCamelCase )-> str:
requires_backends(cls , ['''flax'''] )
| 702 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _lowerCAmelCase ( lowercase : List[str] , lowercase : Tuple , lowercase : str , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : int ) ->str:
"""simple docstring"""
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowercase__ = '''lm_head'''
lowercase__ = getattr(lowercase , lowercase )
if weight_type is not None:
lowercase__ = getattr(lowercase , lowercase ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _lowerCAmelCase ( lowercase : List[str] , lowercase : Optional[Any] , lowercase : Any ) ->Tuple:
"""simple docstring"""
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == '''group''' , )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(lowercase )[0].split('''.''' )[-2]
lowercase__ = mapped_key.replace('''*''' , lowercase )
if "weight_g" in name:
lowercase__ = '''weight_g'''
elif "weight_v" in name:
lowercase__ = '''weight_v'''
elif "bias" in name:
lowercase__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ = '''weight'''
else:
lowercase__ = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _lowerCAmelCase ( lowercase : List[str] , lowercase : List[str] , lowercase : str , lowercase : Optional[int] , lowercase : List[str] ) ->Optional[Any]:
"""simple docstring"""
lowercase__ = full_name.split('''conv_layers.''' )[-1]
lowercase__ = name.split('''.''' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCAmelCase ( lowercase : Dict , lowercase : Optional[int] , lowercase : Tuple=None , lowercase : Tuple=None , lowercase : Any=True ) ->Tuple:
"""simple docstring"""
if config_path is not None:
lowercase__ = UniSpeechConfig.from_pretrained(lowercase )
else:
lowercase__ = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowercase__ = Dictionary.load_from_json(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ = target_dict.pad_index
lowercase__ = target_dict.bos_index
lowercase__ = target_dict.eos_index
lowercase__ = len(target_dict.symbols )
lowercase__ = os.path.join(lowercase , '''vocab.json''' )
if not os.path.isdir(lowercase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
lowercase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ = 4_2
lowercase__ = 4_3
with open(lowercase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowercase , lowercase )
lowercase__ = WavaVecaPhonemeCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase , )
lowercase__ = True if config.feat_extract_norm == '''layer''' else False
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
lowercase__ = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
lowercase__ = UniSpeechForCTC(lowercase )
else:
lowercase__ = UniSpeechForPreTraining(lowercase )
if is_finetuned:
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowercase__ = model[0].eval()
recursively_load_weights(lowercase , lowercase , lowercase )
hf_unispeech.save_pretrained(lowercase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_lowerCAmelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 318 | 0 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = 1.5
UpperCAmelCase_ : Tuple = int(factor * num_class_images )
UpperCAmelCase_ : str = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=_SCREAMING_SNAKE_CASE )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCAmelCase_ : Tuple = client.query(text=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCAmelCase_ : Dict = int(factor * num_images )
UpperCAmelCase_ : int = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : str = tqdm(desc="downloading real regularization images" , total=_SCREAMING_SNAKE_CASE )
with open(F'''{class_data_dir}/caption.txt''' , "w" ) as fa, open(F'''{class_data_dir}/urls.txt''' , "w" ) as fa, open(
F'''{class_data_dir}/images.txt''' , "w" ) as fa:
while total < num_class_images:
UpperCAmelCase_ : Optional[Any] = class_images[count]
count += 1
try:
UpperCAmelCase_ : Tuple = requests.get(images["url"] )
if img.status_code == 2_00:
UpperCAmelCase_ : Optional[int] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a__ ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser("" , add_help=_SCREAMING_SNAKE_CASE )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("--class_data_dir" , help="path to save images" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument("--num_class_images" , help="number of images to download" , default=2_00 , type=_SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
_lowerCamelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 71 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 1 |
'''simple docstring'''
def _A ( A ) -> Optional[Any]:
lowercase : Union[str, Any] = abs(UpperCamelCase__ )
lowercase : int = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def _A ( A ) -> Tuple:
lowercase : int = abs(UpperCamelCase__ )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def _A ( A ) -> Dict:
return sum(int(UpperCamelCase__ ) for c in str(abs(UpperCamelCase__ ) ) )
def _A ( ) -> int:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A ,A ) -> None:
lowercase : List[str] = F'''{func.__name__}({value})'''
lowercase : List[str] = timeit(F'''__main__.{call}''' ,setup="import __main__" )
print(F'''{call:56} = {func(UpperCamelCase__ )} -- {timing:.4f} seconds''' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCamelCase__ ,UpperCamelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 720 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=9_9 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> Any:
lowercase : List[str] = parent
lowercase : str = batch_size
lowercase : int = seq_length
lowercase : Any = is_training
lowercase : List[Any] = use_input_mask
lowercase : str = use_token_type_ids
lowercase : List[str] = use_labels
lowercase : Optional[Any] = vocab_size
lowercase : List[Any] = hidden_size
lowercase : List[Any] = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : Union[str, Any] = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : Optional[Any] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : List[str] = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : Dict = type_sequence_label_size
lowercase : Optional[Any] = initializer_range
lowercase : int = num_labels
lowercase : int = num_choices
lowercase : Tuple = scope
def a__ ( self ) -> Dict:
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] = None
if self.use_input_mask:
lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
lowercase : str = None
lowercase : Optional[int] = None
if self.use_labels:
lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> Any:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[Any]:
lowercase : Tuple = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
lowercase : Optional[Any] = model(a_ , a_ )
lowercase : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> str:
lowercase : List[Any] = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowercase : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> int:
lowercase : Optional[Any] = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
lowercase : List[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
lowercase : Union[str, Any] = self.num_labels
lowercase : Optional[int] = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowercase : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
lowercase : List[Any] = self.num_labels
lowercase : Optional[Any] = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowercase : Optional[Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
lowercase : Optional[int] = self.num_choices
lowercase : Tuple = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
lowercase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Tuple = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self ) -> Tuple:
lowercase : Any = self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : int = config_and_inputs
lowercase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_snake_case = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = True
_snake_case = True
_snake_case = True
def a__ ( self ) -> int:
lowercase : str = DistilBertModelTester(self )
lowercase : Optional[Any] = ConfigTester(self , config_class=a_ , dim=3_7 )
def a__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[Any]:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def a__ ( self ) -> Tuple:
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def a__ ( self ) -> Union[str, Any]:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def a__ ( self ) -> Optional[Any]:
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def a__ ( self ) -> Optional[Any]:
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def a__ ( self ) -> List[str]:
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def a__ ( self ) -> List[Any]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Union[str, Any] = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def a__ ( self ) -> Tuple:
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase : Any = True
lowercase : List[str] = model_class(config=a_ )
lowercase : Optional[int] = self._prepare_for_class(a_ , a_ )
lowercase : Union[str, Any] = torch.jit.trace(
a_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , "traced_model.pt" ) )
lowercase : str = torch.jit.load(os.path.join(a_ , "traced_model.pt" ) , map_location=a_ )
loaded(inputs_dict["input_ids"].to(a_ ) , inputs_dict["attention_mask"].to(a_ ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def a__ ( self ) -> List[str]:
lowercase : Dict = DistilBertModel.from_pretrained("distilbert-base-uncased" )
lowercase : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Union[str, Any] = model(a_ , attention_mask=a_ )[0]
lowercase : List[str] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a_ )
lowercase : Optional[int] = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) )
| 425 | 0 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __SCREAMING_SNAKE_CASE ( yaml.SafeLoader ):
'''simple docstring'''
def _lowerCamelCase ( self : Optional[int] ,UpperCamelCase : int ) -> Dict:
_lowercase : List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
_lowercase : Optional[int] = [tuple(__A ) if isinstance(__A ,__A ) else key for key in keys]
_lowercase : List[str] = Counter(__A )
_lowercase : List[Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def _lowerCamelCase ( self : int ,UpperCamelCase : List[Any] ,UpperCamelCase : Tuple=False ) -> Optional[int]:
_lowercase : List[str] = super().construct_mapping(__A ,deep=__A )
self._check_no_duplicates_on_constructed_node(__A )
return mapping
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : str) -> Optional[int]:
'''simple docstring'''
_lowercase : Any = list(readme_content.splitlines())
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_lowercase : List[Any] = full_content[1:].index('---') + 1
_lowercase : Dict = "\n".join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(_lowerCAmelCase)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def _lowerCamelCase ( cls : List[str] ,UpperCamelCase : Path ) -> Any:
with open(__A ,encoding='utf-8' ) as readme_file:
_lowercase : Optional[int] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__A )
else:
return cls()
def _lowerCamelCase ( self : str ,UpperCamelCase : Path ) -> int:
if path.exists():
with open(__A ,encoding='utf-8' ) as readme_file:
_lowercase : Tuple = readme_file.read()
else:
_lowercase : Optional[Any] = None
_lowercase : Union[str, Any] = self._to_readme(__A )
with open(__A ,'w' ,encoding='utf-8' ) as readme_file:
readme_file.write(__A )
def _lowerCamelCase ( self : Optional[int] ,UpperCamelCase : Optional[str] = None ) -> Optional[Any]:
if readme_content is not None:
_lowercase : Union[str, Any] = _split_yaml_from_readme(__A )
_lowercase : List[str] = "---\n" + self.to_yaml_string() + "---\n" + content
else:
_lowercase : List[str] = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def _lowerCamelCase ( cls : List[Any] ,UpperCamelCase : str ) -> str:
_lowercase : Any = yaml.load(__A ,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_lowercase : Optional[Any] = {
(key.replace('-' ,'_' ) if key.replace('-' ,'_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__A )
def _lowerCamelCase ( self : str ) -> Union[str, Any]:
return yaml.safe_dump(
{
(key.replace('_' ,'-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} ,sort_keys=__A ,allow_unicode=__A ,encoding='utf-8' ,).decode('utf-8' )
A = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
A = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
A = ap.parse_args()
A = Path(args.readme_filepath)
A = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath) | 125 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase_ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[Any] = floats_tensor((1, 3, 1_6, 1_6),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Any = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Tuple = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(__A ) ).to(__A )
_lowerCamelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def lowerCamelCase_ ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda",reason="float16 requires CUDA" )
def lowerCamelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase_ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase_ ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,) | 44 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = None ) -> list[list[str]]:
"""simple docstring"""
__UpperCAmelCase : str = word_bank or []
# create a table
__UpperCAmelCase : int = len(lowerCamelCase__ ) + 1
__UpperCAmelCase : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
__UpperCAmelCase : Optional[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
__UpperCAmelCase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 10 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "speech_to_text_2"
snake_case :List[Any] = ["past_key_values"]
snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = decoder_layers
__UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a ( unittest.TestCase ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = ScoreSdeVeScheduler()
lowerCAmelCase = ScoreSdeVePipeline(unet=_snake_case , scheduler=_snake_case )
sde_ve.to(_snake_case )
sde_ve.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_snake_case ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_snake_case , return_dict=_snake_case )[
0
]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'google/ncsnpp-church-256'
lowerCAmelCase = UNetaDModel.from_pretrained(_snake_case )
lowerCAmelCase = ScoreSdeVeScheduler.from_pretrained(_snake_case )
lowerCAmelCase = ScoreSdeVePipeline(unet=_snake_case , scheduler=_snake_case )
sde_ve.to(_snake_case )
sde_ve.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=_snake_case ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
lowerCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 |
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> float:
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__A , __A ) or not isinstance(__A , __A ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case = float(factorial(__A ) )
coefficient /= factorial(__A ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 495 | 0 |
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
__snake_case = str(snake_case_ )
__snake_case = ''''''.join(sorted(snake_case_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def lowerCamelCase__ ( snake_case_ : float = 99 ) -> int:
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
__snake_case = 0
__snake_case = 1
while True:
if check_bouncy(snake_case_ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(99)}')
| 388 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case_ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> int:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Union[str, Any]:
from transformers.testing_utils import pytest_terminal_summary_main
__snake_case = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 388 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=3 , A_=32 , A_=3 , A_=10 , A_=[8, 16, 32, 64] , A_=[1, 1, 2, 1] , A_=True , A_=True , A_="relu" , A_=3 , A_=None , A_=["stage2", "stage3", "stage4"] , A_=[2, 3, 4] , A_=1 , )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(A_ )
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = num_groups
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = BitModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> int:
'''simple docstring'''
UpperCamelCase = self.num_labels
UpperCamelCase = BitForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = BitBackbone(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = BitBackbone(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase_ = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = BitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
return
@unittest.skip(reason='Bit does not output attentions' )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=A_ )
for name, module in model.named_modules():
if isinstance(A_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BitModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_( ):
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = BitConfig
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = BitModelTester(self )
| 3 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCAmelCase = '''CIDAS/clipseg-rd64-refined'''
lowerCAmelCase = '''image_segmenter'''
lowerCAmelCase = CLIPSegForImageSegmentation
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''image''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self ,['''vision'''] )
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
with torch.no_grad():
UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy()
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 30 | 0 |
"""simple docstring"""
def lowercase (_snake_case = 1000 ) -> int:
'''simple docstring'''
__UpperCamelCase = 2**power
__UpperCamelCase = str(_snake_case )
__UpperCamelCase = list(_snake_case )
__UpperCamelCase = 0
for i in list_num:
sum_of_num += int(_snake_case )
return sum_of_num
if __name__ == "__main__":
_A = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
_A = solution(power)
print("Sum of the digits is: ", result) | 228 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_A = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_A = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def lowercase (_snake_case ,_snake_case ,_snake_case ) -> Any:
'''simple docstring'''
__UpperCamelCase = SavedModel()
__UpperCamelCase = []
with open(os.path.join(_snake_case ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
__UpperCamelCase = json.load(_snake_case )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
__UpperCamelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__UpperCamelCase = sorted(_snake_case )
__UpperCamelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(_snake_case ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*_snake_case ,sep="\n" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
_A = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset) | 228 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCAmelCase (__A):
"""simple docstring"""
_a = DPTConfig(embedding_type='''hybrid''')
if "large" in checkpoint_url:
_a = 1_024
_a = 4_096
_a = 24
_a = 16
_a = [5, 11, 17, 23]
_a = [256, 512, 1_024, 1_024]
_a = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_a = 768
_a = [1, 1, 1, 0.5]
_a = [256, 512, 768, 768]
_a = 150
_a = 16
_a = (1, 384, 384)
_a = False
_a = '''project'''
if "ade" in checkpoint_url:
_a = True
_a = 768
_a = [1, 1, 1, 0.5]
_a = 150
_a = 16
_a = '''huggingface/label-files'''
_a = '''ade20k-id2label.json'''
_a = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''')) , '''r'''))
_a = {int(__A): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
_a = [1, 150, 480, 480]
return config, expected_shape
def lowerCAmelCase (__A):
"""simple docstring"""
_a = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__A , __A)
def lowerCAmelCase (__A):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_a = name.replace('''pretrained.model''' , '''dpt.encoder''')
if "pretrained.model" in name:
_a = name.replace('''pretrained.model''' , '''dpt.embeddings''')
if "patch_embed" in name:
_a = name.replace('''patch_embed''' , '''''')
if "pos_embed" in name:
_a = name.replace('''pos_embed''' , '''position_embeddings''')
if "attn.proj" in name:
_a = name.replace('''attn.proj''' , '''attention.output.dense''')
if "proj" in name and "project" not in name:
_a = name.replace('''proj''' , '''projection''')
if "blocks" in name:
_a = name.replace('''blocks''' , '''layer''')
if "mlp.fc1" in name:
_a = name.replace('''mlp.fc1''' , '''intermediate.dense''')
if "mlp.fc2" in name:
_a = name.replace('''mlp.fc2''' , '''output.dense''')
if "norm1" in name and "backbone" not in name:
_a = name.replace('''norm1''' , '''layernorm_before''')
if "norm2" in name and "backbone" not in name:
_a = name.replace('''norm2''' , '''layernorm_after''')
if "scratch.output_conv" in name:
_a = name.replace('''scratch.output_conv''' , '''head''')
if "scratch" in name:
_a = name.replace('''scratch''' , '''neck''')
if "layer1_rn" in name:
_a = name.replace('''layer1_rn''' , '''convs.0''')
if "layer2_rn" in name:
_a = name.replace('''layer2_rn''' , '''convs.1''')
if "layer3_rn" in name:
_a = name.replace('''layer3_rn''' , '''convs.2''')
if "layer4_rn" in name:
_a = name.replace('''layer4_rn''' , '''convs.3''')
if "refinenet" in name:
_a = int(name[len('''neck.refinenet''') : len('''neck.refinenet''') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_a = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
_a = name.replace('''out_conv''' , '''projection''')
if "resConfUnit1" in name:
_a = name.replace('''resConfUnit1''' , '''residual_layer1''')
if "resConfUnit2" in name:
_a = name.replace('''resConfUnit2''' , '''residual_layer2''')
if "conv1" in name:
_a = name.replace('''conv1''' , '''convolution1''')
if "conv2" in name:
_a = name.replace('''conv2''' , '''convolution2''')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_a = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''')
if "pretrained.act_postprocess2.0.project.0" in name:
_a = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''')
if "pretrained.act_postprocess3.0.project.0" in name:
_a = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''')
if "pretrained.act_postprocess4.0.project.0" in name:
_a = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_a = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''')
if "pretrained.act_postprocess1.4" in name:
_a = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''')
if "pretrained.act_postprocess2.3" in name:
_a = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''')
if "pretrained.act_postprocess2.4" in name:
_a = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''')
if "pretrained.act_postprocess3.3" in name:
_a = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''')
if "pretrained.act_postprocess4.3" in name:
_a = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''')
if "pretrained.act_postprocess4.4" in name:
_a = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''')
if "pretrained" in name:
_a = name.replace('''pretrained''' , '''dpt''')
if "bn" in name:
_a = name.replace('''bn''' , '''batch_norm''')
if "head" in name:
_a = name.replace('''head''' , '''head.head''')
if "encoder.norm" in name:
_a = name.replace('''encoder.norm''' , '''layernorm''')
if "auxlayer" in name:
_a = name.replace('''auxlayer''' , '''auxiliary_head.head''')
if "backbone" in name:
_a = name.replace('''backbone''' , '''backbone.bit.encoder''')
if ".." in name:
_a = name.replace('''..''' , '''.''')
if "stem.conv" in name:
_a = name.replace('''stem.conv''' , '''bit.embedder.convolution''')
if "blocks" in name:
_a = name.replace('''blocks''' , '''layers''')
if "convolution" in name and "backbone" in name:
_a = name.replace('''convolution''' , '''conv''')
if "layer" in name and "backbone" in name:
_a = name.replace('''layer''' , '''layers''')
if "backbone.bit.encoder.bit" in name:
_a = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''')
if "embedder.conv" in name:
_a = name.replace('''embedder.conv''' , '''embedder.convolution''')
if "backbone.bit.encoder.stem.norm" in name:
_a = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''')
return name
def lowerCAmelCase (__A , __A):
"""simple docstring"""
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''')
_a = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[: config.hidden_size, :]
_a = in_proj_bias[: config.hidden_size]
_a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a = in_proj_weight[
-config.hidden_size :, :
]
_a = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase ():
"""simple docstring"""
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = Image.open(requests.get(__A , stream=__A).raw)
return im
@torch.no_grad()
def lowerCAmelCase (__A , __A , __A , __A , __A):
"""simple docstring"""
_a , _a = get_dpt_config(__A)
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_a = torch.load(__A , map_location='''cpu''')
# remove certain keys
remove_ignore_keys_(__A)
# rename keys
for key in state_dict.copy().keys():
_a = state_dict.pop(__A)
_a = val
# read in qkv matrices
read_in_q_k_v(__A , __A)
# load HuggingFace model
_a = DPTForSemanticSegmentation(__A) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__A)
model.load_state_dict(__A)
model.eval()
# Check outputs on an image
_a = 480 if '''ade''' in checkpoint_url else 384
_a = DPTImageProcessor(size=__A)
_a = prepare_img()
_a = image_processor(__A , return_tensors='''pt''')
# forward pass
_a = model(**__A).logits if '''ade''' in checkpoint_url else model(**__A).predicted_depth
if show_prediction:
_a = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=__A , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255).show()
if pytorch_dump_folder_path is not None:
Path(__A).mkdir(exist_ok=__A)
print(F'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(__A)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(__A)
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''')
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''')
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
lowercase_ = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 11 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert('RGB' )
return image
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = val
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
SCREAMING_SNAKE_CASE__ = torch.cat((q_bias, torch.zeros_like(UpperCamelCase_ , requires_grad=UpperCamelCase_ ), v_bias) )
SCREAMING_SNAKE_CASE__ = qkv_bias
def _lowercase ( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 364 if 'coco' in model_name else 224
SCREAMING_SNAKE_CASE__ = InstructBlipVisionConfig(image_size=UpperCamelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
SCREAMING_SNAKE_CASE__ = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE__ = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
SCREAMING_SNAKE_CASE__ = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
SCREAMING_SNAKE_CASE__ = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=32001 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
SCREAMING_SNAKE_CASE__ = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
SCREAMING_SNAKE_CASE__ = InstructBlipConfig(vision_config=UpperCamelCase_ , text_config=UpperCamelCase_ , qformer_config=UpperCamelCase_ )
return config, image_size
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=False ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
SCREAMING_SNAKE_CASE__ = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
SCREAMING_SNAKE_CASE__ = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_blipa_config(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = InstructBlipForConditionalGeneration(UpperCamelCase_ ).eval()
SCREAMING_SNAKE_CASE__ = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
SCREAMING_SNAKE_CASE__ = 'cuda:1' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE__ = 'cuda:2' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = load_model_and_preprocess(
name=UpperCamelCase_ , model_type=UpperCamelCase_ , is_eval=UpperCamelCase_ , device=UpperCamelCase_ )
original_model.eval()
print('Done!' )
# update state dict keys
SCREAMING_SNAKE_CASE__ = original_model.state_dict()
SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE__ = state_dict.pop(UpperCamelCase_ )
if key.startswith('Qformer.bert' ):
SCREAMING_SNAKE_CASE__ = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
SCREAMING_SNAKE_CASE__ = key.replace('self' , 'attention' )
if "llm_proj" in key:
SCREAMING_SNAKE_CASE__ = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
SCREAMING_SNAKE_CASE__ = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
SCREAMING_SNAKE_CASE__ = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
SCREAMING_SNAKE_CASE__ = key.replace('t5' , 'language' )
SCREAMING_SNAKE_CASE__ = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase_ , UpperCamelCase_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = load_demo_image()
SCREAMING_SNAKE_CASE__ = 'What is unusual about this image?'
# create processor
SCREAMING_SNAKE_CASE__ = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = InstructBlipProcessor(
image_processor=UpperCamelCase_ , tokenizer=UpperCamelCase_ , qformer_tokenizer=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = processor(images=UpperCamelCase_ , text=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ )
# make sure processor creates exact same pixel values
SCREAMING_SNAKE_CASE__ = vis_processors['eval'](UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
hf_model.to(UpperCamelCase_ )
with torch.no_grad():
if "vicuna" in model_name:
SCREAMING_SNAKE_CASE__ = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
SCREAMING_SNAKE_CASE__ = hf_model(**UpperCamelCase_ ).logits
else:
SCREAMING_SNAKE_CASE__ = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
SCREAMING_SNAKE_CASE__ = tokenizer('\n' , return_tensors='pt' ).input_ids.to(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
SCREAMING_SNAKE_CASE__ = hf_model(**UpperCamelCase_ , labels=UpperCamelCase_ ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
SCREAMING_SNAKE_CASE__ = 1e-4 if 'vicuna' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , UpperCamelCase_ , atol=UpperCamelCase_ )
print('Looks ok!' )
print('Generating with original model...' )
SCREAMING_SNAKE_CASE__ = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
SCREAMING_SNAKE_CASE__ = hf_model.generate(
**UpperCamelCase_ , do_sample=UpperCamelCase_ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
SCREAMING_SNAKE_CASE__ = 2
print('Original generation:' , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = processor.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = [text.strip() for text in output_text]
print('HF generation:' , UpperCamelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase_ )
hf_model.save_pretrained(UpperCamelCase_ )
if push_to_hub:
processor.push_to_hub(F'Salesforce/{model_name}' )
hf_model.push_to_hub(F'Salesforce/{model_name}' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 472 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ :int = logging.get_logger(__name__)
a_ :List[Any] = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class lowercase ( _a ):
lowerCamelCase : Union[str, Any] = """deta"""
lowerCamelCase : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Dict , _lowercase : int=None , _lowercase : Optional[int]=9_00 , _lowercase : Optional[int]=20_48 , _lowercase : Optional[Any]=6 , _lowercase : Any=20_48 , _lowercase : str=8 , _lowercase : List[Any]=6 , _lowercase : Any=10_24 , _lowercase : Optional[int]=8 , _lowercase : int=0.0 , _lowercase : Any=True , _lowercase : Any="relu" , _lowercase : List[str]=2_56 , _lowercase : Any=0.1 , _lowercase : str=0.0 , _lowercase : List[Any]=0.0 , _lowercase : Tuple=0.02 , _lowercase : List[Any]=1.0 , _lowercase : List[Any]=True , _lowercase : Dict=False , _lowercase : Optional[Any]="sine" , _lowercase : Dict=5 , _lowercase : Optional[Any]=4 , _lowercase : List[str]=4 , _lowercase : str=True , _lowercase : List[str]=3_00 , _lowercase : Dict=True , _lowercase : List[Any]=True , _lowercase : Dict=1 , _lowercase : str=5 , _lowercase : List[Any]=2 , _lowercase : Dict=1 , _lowercase : Any=1 , _lowercase : Any=5 , _lowercase : List[str]=2 , _lowercase : int=0.1 , _lowercase : str=0.25 , **_lowercase : Optional[Any] , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE__ : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE__ : List[Any] = backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE__ : List[str] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ : Optional[int] = config_class.from_dict(snake_case_ )
SCREAMING_SNAKE_CASE__ : List[Any] = backbone_config
SCREAMING_SNAKE_CASE__ : Dict = num_queries
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = d_model
SCREAMING_SNAKE_CASE__ : Tuple = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE__ : int = activation_function
SCREAMING_SNAKE_CASE__ : List[str] = init_std
SCREAMING_SNAKE_CASE__ : List[str] = init_xavier_std
SCREAMING_SNAKE_CASE__ : List[str] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE__ : str = position_embedding_type
# deformable attributes
SCREAMING_SNAKE_CASE__ : Optional[int] = num_feature_levels
SCREAMING_SNAKE_CASE__ : Dict = encoder_n_points
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_n_points
SCREAMING_SNAKE_CASE__ : List[str] = two_stage
SCREAMING_SNAKE_CASE__ : Union[str, Any] = two_stage_num_proposals
SCREAMING_SNAKE_CASE__ : str = with_box_refine
SCREAMING_SNAKE_CASE__ : int = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Tuple = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : int = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[int] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : Union[str, Any] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : Any = eos_coefficient
SCREAMING_SNAKE_CASE__ : List[Any] = focal_alpha
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowercase__ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def lowercase__ ( self : Optional[int] ):
return self.d_model
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Dict = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : List[Any] = self.__class__.model_type
return output
| 720 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
a_ :str = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def a ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__=False , ) -> Optional[Any]:
'''simple docstring'''
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def a ( A__ , A__ , A__ , A__ = False ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : List[str] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionPipeline.from_pretrained(A__ , torch_dtype=A__ ).to(A__ )
SCREAMING_SNAKE_CASE__ : int = Path(A__ )
# TEXT ENCODER
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline.text_encoder.config.max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = pipeline.text_encoder.config.hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=A__ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=A__ , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=A__ , )
del pipeline.text_encoder
# UNET
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline.unet.config.in_channels
SCREAMING_SNAKE_CASE__ : Tuple = pipeline.unet.config.sample_size
SCREAMING_SNAKE_CASE__ : Dict = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
torch.randn(2 ).to(device=A__ , dtype=A__ ),
torch.randn(2 , A__ , A__ ).to(device=A__ , dtype=A__ ),
False,
) , output_path=A__ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=A__ , use_external_data_format=A__ , )
SCREAMING_SNAKE_CASE__ : List[str] = str(unet_path.absolute().as_posix() )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.dirname(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = onnx.load(A__ )
# clean up existing tensor files
shutil.rmtree(A__ )
os.mkdir(A__ )
# collate external tensor files into one
onnx.save_model(
A__ , A__ , save_as_external_data=A__ , all_tensors_to_one_file=A__ , location='''weights.pb''' , convert_attribute=A__ , )
del pipeline.unet
# VAE ENCODER
SCREAMING_SNAKE_CASE__ : Optional[int] = pipeline.vae
SCREAMING_SNAKE_CASE__ : str = vae_encoder.config.in_channels
SCREAMING_SNAKE_CASE__ : str = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
SCREAMING_SNAKE_CASE__ : Dict = lambda A__ , A__ : vae_encoder.encode(A__ , A__ )[0].sample()
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=A__ , )
# VAE DECODER
SCREAMING_SNAKE_CASE__ : Tuple = pipeline.vae
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vae_decoder.config.latent_channels
SCREAMING_SNAKE_CASE__ : Dict = vae_decoder.config.out_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vae_encoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=A__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
SCREAMING_SNAKE_CASE__ : int = pipeline.safety_checker
SCREAMING_SNAKE_CASE__ : int = safety_checker.config.vision_config.num_channels
SCREAMING_SNAKE_CASE__ : Dict = safety_checker.config.vision_config.image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , A__ , A__ , A__ , ).to(device=A__ , dtype=A__ ),
torch.randn(1 , A__ , A__ , A__ ).to(device=A__ , dtype=A__ ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=A__ , )
del pipeline.safety_checker
SCREAMING_SNAKE_CASE__ : Optional[int] = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
SCREAMING_SNAKE_CASE__ : str = pipeline.feature_extractor
else:
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : List[Any] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=A__ , feature_extractor=A__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(A__ )
print('''ONNX pipeline saved to''' , A__ )
del pipeline
del onnx_pipeline
SCREAMING_SNAKE_CASE__ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(A__ , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
a_ :List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
a_ :Optional[Any] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 250 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] , _lowerCAmelCase : list[list[str]] , _lowerCAmelCase : int , ):
"""simple docstring"""
_lowerCamelCase : Any = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : list[list[str]] = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print("" )
print(len(lowerCAmelCase__ ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 44 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class __UpperCAmelCase ( _UpperCamelCase ):
def __init__( self : str , a_ : Dict , a_ : List[str]=False , a_ : Any=True , a_ : int=False , a_ : Union[str, Any]="<s>" , a_ : Optional[int]="</s>" , a_ : int="<unk>" , a_ : List[Any]="<sep>" , a_ : Dict="<pad>" , a_ : Any="<cls>" , a_ : Optional[Any]="<mask>" , a_ : int=["<eop>", "<eod>"] , a_ : Optional[Dict[str, Any]] = None , **a_ : int , ) -> None:
'''simple docstring'''
a__ : List[str] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
a__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
a__ : Union[str, Any] = 3
a__ : Dict = do_lower_case
a__ : Union[str, Any] = remove_space
a__ : int = keep_accents
a__ : str = vocab_file
a__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
a__ : Optional[int] = jieba
a__ : Optional[int] = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
'''simple docstring'''
a__ : Tuple = self.__dict__.copy()
a__ : Union[str, Any] = None
return state
def __setstate__( self : Tuple , a_ : int ) -> List[str]:
'''simple docstring'''
a__ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : str = {}
a__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self : List[Any] , a_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
if self.remove_space:
a__ : Union[str, Any] = " ".join(inputs.strip().split() )
else:
a__ : Optional[Any] = inputs
a__ : List[str] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
a__ : Union[str, Any] = unicodedata.normalize("NFKD" , a_ )
a__ : Union[str, Any] = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
a__ : List[Any] = outputs.lower()
return outputs
def UpperCAmelCase ( self : Any , a_ : str ) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = self.preprocess_text(a_ )
a__ : Dict = self.sp_model.encode(a_ , out_type=a_ )
a__ : Optional[Any] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
a__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a__ : List[str] = cur_pieces[1:]
else:
a__ : List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def UpperCAmelCase ( self : int , a_ : Dict ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(a_ )
def UpperCAmelCase ( self : Dict , a_ : Tuple ) -> List[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(a_ )
def UpperCAmelCase ( self : Union[str, Any] , a_ : List[Any] ) -> str:
'''simple docstring'''
a__ : Optional[Any] = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def UpperCAmelCase ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[Any] = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self : int , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def UpperCAmelCase ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[str] = [self.sep_token_id]
a__ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self : Dict , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a__ : Optional[int] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
a__ : int = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def UpperCAmelCase ( self : str , *a_ : Union[str, Any] , **a_ : Any ) -> int:
'''simple docstring'''
a__ : Optional[int] = super()._decode(*a_ , **a_ )
a__ : Tuple = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text | 642 | 0 |
import random
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> tuple:
__lowercase , __lowercase , __lowercase = [], [], []
for element in data:
if element < pivot:
less.append(lowercase__ )
elif element > pivot:
greater.append(lowercase__ )
else:
equal.append(lowercase__ )
return less, equal, greater
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> Dict:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(lowercase__ ) or index < 0:
return None
__lowercase = items[random.randint(0 , len(lowercase__ ) - 1 )]
__lowercase = 0
__lowercase , __lowercase , __lowercase = _partition(lowercase__ , lowercase__ )
__lowercase = len(lowercase__ )
__lowercase = len(lowercase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowercase__ , lowercase__ )
# must be in larger
else:
return quick_select(lowercase__ , index - (m + count) )
| 708 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 0 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class SCREAMING_SNAKE_CASE ( __A ):
"""simple docstring"""
def __init__( self : Tuple , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(UpperCamelCase__ )
UpperCamelCase = self.values[key]
def A ( self : str ):
"""simple docstring"""
return (
sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def A ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None ):
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0
):
return key
return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
| 430 |
'''simple docstring'''
def lowercase_ ( __A : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__A , __A ):
return 0
elif n == 2:
return 1
else:
lowercase : Tuple =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase_ ( __A : int ) -> int:
"""simple docstring"""
lowercase : List[str] =0
lowercase : str =2
while digits < n:
index += 1
lowercase : int =len(str(fibonacci(__A ) ) )
return index
def lowercase_ ( __A : int = 1_0_0_0 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__A )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 94 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int=13 , lowerCamelCase_ : Dict=7 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Any=99 , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : Tuple=5 , lowerCamelCase_ : Any=4 , lowerCamelCase_ : Tuple=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=5_12 , lowerCamelCase_ : List[str]=16 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : str=0.02 , lowerCamelCase_ : Any=False , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : str="None" , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[str]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : str = seq_length
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : Any = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = num_labels
SCREAMING_SNAKE_CASE : str = num_choices
SCREAMING_SNAKE_CASE : Tuple = relative_attention
SCREAMING_SNAKE_CASE : Dict = position_biased_input
SCREAMING_SNAKE_CASE : Optional[Any] = pos_att_type
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
SCREAMING_SNAKE_CASE : Any = 3_00
return config
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Tuple ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = DebertaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = DebertaForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = DebertaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = DebertaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = DebertaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = DebertaModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = DebertaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
SCREAMING_SNAKE_CASE : str = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
SCREAMING_SNAKE_CASE : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 79 |
'''simple docstring'''
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : List[str] = number
while duplicate > 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = divmod(lowerCamelCase_ , 10 )
fact_sum += factorial(lowerCamelCase_ )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
__UpperCAmelCase = int(input("""Enter number: """).strip())
print(
f'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 79 | 1 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCAmelCase = "owlvit_text_model"
def __init__(self , _lowercase=49408 , _lowercase=512 , _lowercase=2048 , _lowercase=12 , _lowercase=8 , _lowercase=16 , _lowercase="quick_gelu" , _lowercase=1e-5 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , _lowercase=0 , _lowercase=49406 , _lowercase=49407 , **_lowercase , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__a : int = vocab_size
__a : Tuple = hidden_size
__a : Any = intermediate_size
__a : List[Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Dict = max_position_embeddings
__a : Union[str, Any] = hidden_act
__a : Any = layer_norm_eps
__a : Union[str, Any] = attention_dropout
__a : Any = initializer_range
__a : int = initializer_factor
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
__a : Union[str, Any] = cls.get_config_dict(_lowercase , **_lowercase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
__a : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCAmelCase = "owlvit_vision_model"
def __init__(self , _lowercase=768 , _lowercase=3072 , _lowercase=12 , _lowercase=12 , _lowercase=3 , _lowercase=768 , _lowercase=32 , _lowercase="quick_gelu" , _lowercase=1e-5 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : Optional[int] = hidden_size
__a : Optional[int] = intermediate_size
__a : Union[str, Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Union[str, Any] = num_channels
__a : int = image_size
__a : Dict = patch_size
__a : Tuple = hidden_act
__a : Tuple = layer_norm_eps
__a : Optional[int] = attention_dropout
__a : str = initializer_range
__a : Union[str, Any] = initializer_factor
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
__a : int = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
__a : Any = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCAmelCase = "owlvit"
_lowerCAmelCase = True
def __init__(self , _lowercase=None , _lowercase=None , _lowercase=512 , _lowercase=2.6592 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
if text_config is None:
__a : Optional[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
__a : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
__a : Optional[int] = OwlViTTextConfig(**_lowercase )
__a : Optional[int] = OwlViTVisionConfig(**_lowercase )
__a : Union[str, Any] = projection_dim
__a : str = logit_scale_init_value
__a : int = return_dict
__a : List[str] = 1.0
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
__a : List[str] = cls.get_config_dict(_lowercase , **_lowercase )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
__a : List[str] = {}
__a : List[str] = text_config
__a : Any = vision_config
return cls.from_dict(_lowercase , **_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = copy.deepcopy(self.__dict__ )
__a : Dict = self.text_config.to_dict()
__a : Optional[int] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 1e-4
def lowerCAmelCase__(self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = None , ):
'''simple docstring'''
__a : Any = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_lowercase , seq_length=_lowercase , framework=_lowercase )
__a : Any = super().generate_dummy_inputs(
processor.image_processor , batch_size=_lowercase , framework=_lowercase )
return {**text_input_dict, **image_input_dict}
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 14
| 581 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Dict = checkpoints.load_tax_checkpoint(__UpperCamelCase )
snake_case_ : Tuple = flatten_dict(__UpperCamelCase )
return flax_params
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
snake_case_ : List[Any] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
snake_case_ : Optional[Any] = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case_ : List[Any] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case_ : List[str] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case_ : Optional[int] = new_key.replace(__UpperCamelCase , __UpperCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case_ : Optional[Any] = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Union[str, Any] = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case_ : int = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , __UpperCamelCase )
snake_case_ : Dict = flax_dict[key]
snake_case_ : Tuple = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case_ : Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
snake_case_ : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=False ):
'''simple docstring'''
snake_case_ : Optional[int] = get_flax_param(__UpperCamelCase )
if not use_large:
snake_case_ : Optional[int] = PixaStructVisionConfig()
snake_case_ : Optional[Any] = PixaStructTextConfig()
else:
snake_case_ : Tuple = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
snake_case_ : List[str] = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
snake_case_ : str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__UpperCamelCase )
snake_case_ : Optional[int] = PixaStructForConditionalGeneration(__UpperCamelCase )
snake_case_ : str = rename_and_convert_flax_params(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
snake_case_ : int = PixaStructImageProcessor()
snake_case_ : str = PixaStructProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase )
if use_large:
snake_case_ : Optional[Any] = 4_0_9_6
snake_case_ : int = True
# mkdir if needed
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
print("""Model saved in {}""".format(__UpperCamelCase ) )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 58 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( A , A , A , A , A = None , A = None , A = None , ) -> Any:
if config_name_or_path is None:
lowerCAmelCase__ = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
lowerCAmelCase__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCAmelCase__ = question_encoder_name_or_path
lowerCAmelCase__ = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
lowerCAmelCase__ = RagConfig.from_pretrained(A )
lowerCAmelCase__ = AutoConfig.from_pretrained(A )
lowerCAmelCase__ = AutoConfig.from_pretrained(A )
lowerCAmelCase__ = gen_config
lowerCAmelCase__ = question_encoder_config
lowerCAmelCase__ = model_class.from_pretrained_question_encoder_generator(
A , A , config=A )
rag_model.save_pretrained(A )
# Sanity check.
model_class.from_pretrained(A )
# Save tokenizers.
lowerCAmelCase__ = AutoTokenizer.from_pretrained(A )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(A )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
) | 700 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def _snake_case ( A , A , A = 16000 ) -> Any:
lowerCAmelCase__ = int(round(sample_rate * max_length ) )
if len(A ) <= sample_length:
return wav
lowerCAmelCase__ = randint(0 , len(A ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : Optional[str] = field(default=a__ , metadata={"help": "Name of a dataset from the datasets package"} )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "A file containing the training audio paths and labels."} )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "A file containing the validation audio paths and labels."} )
lowercase__ : str = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowercase__ : str = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowercase__ : str = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
lowercase__ : str = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowercase__ : float = field(
default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : str = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
lowercase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "Name or path of preprocessor config."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
lowercase__ : bool = field(
default=a__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowercase__ : Optional[bool] = field(
default=a__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , lowerCamelCase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def _snake_case ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , A , A )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(A )
transformers.utils.logging.set_verbosity(A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
lowerCAmelCase__ = DatasetDict()
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCAmelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCAmelCase__ = feature_extractor.model_input_names[0]
def train_transforms(A ):
lowerCAmelCase__ = []
for audio in batch[data_args.audio_column_name]:
lowerCAmelCase__ = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(A )
lowerCAmelCase__ = feature_extractor(A , sampling_rate=feature_extractor.sampling_rate )
lowerCAmelCase__ = {model_input_name: inputs.get(A )}
lowerCAmelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(A ):
lowerCAmelCase__ = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
lowerCAmelCase__ = feature_extractor(A , sampling_rate=feature_extractor.sampling_rate )
lowerCAmelCase__ = {model_input_name: inputs.get(A )}
lowerCAmelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCAmelCase__ = raw_datasets['''train'''].features[data_args.label_column_name].names
lowerCAmelCase__ , lowerCAmelCase__ = {}, {}
for i, label in enumerate(A ):
lowerCAmelCase__ = str(A )
lowerCAmelCase__ = label
# Load the accuracy metric from the datasets package
lowerCAmelCase__ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(A ):
lowerCAmelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=A , references=eval_pred.label_ids )
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(A ) , labelaid=A , idalabel=A , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase__ = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(A , output_all_columns=A )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(A , output_all_columns=A )
# Initialize our trainer
lowerCAmelCase__ = Trainer(
model=A , args=A , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=A , tokenizer=A , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=A )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCAmelCase__ = trainer.evaluate()
trainer.log_metrics('''eval''' , A )
trainer.save_metrics('''eval''' , A )
# Write model card and (optionally) push to hub
lowerCAmelCase__ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A )
else:
trainer.create_model_card(**A )
if __name__ == "__main__":
main() | 98 | 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : str ) -> List[Any]:
__snake_case : Dict = [0] * len(SCREAMING_SNAKE_CASE__ )
for i in range(1 ,len(SCREAMING_SNAKE_CASE__ ) ):
# use last results for better performance - dynamic programming
__snake_case : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__snake_case : Any = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__snake_case : Union[str, Any] = j
return prefix_result
def a_ ( _UpperCAmelCase : str ) -> int:
return max(prefix_function(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __lowercase :
"""simple docstring"""
_A : float
_A : TreeNode | None = None
_A : TreeNode | None = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : TreeNode | None ):
"""simple docstring"""
def is_valid_tree(SCREAMING_SNAKE_CASE__ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
SCREAMING_SNAKE_CASE__ : TreeNode | None , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , SCREAMING_SNAKE_CASE__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , SCREAMING_SNAKE_CASE__ )
)
return is_binary_search_tree_recursive_check(SCREAMING_SNAKE_CASE__ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 480 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def A_ (__a , __a , __a , __a , ):
'''simple docstring'''
A_ = coefficient_matrix.shape
A_ = constant_matrix.shape
if rowsa != colsa:
A_ = f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(__UpperCamelCase )
if colsa != 1:
A_ = f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(__UpperCamelCase )
if rowsa != rowsa:
A_ = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != rowsa:
A_ = (
'''Number of initial values must be equal to number of rows in coefficient '''
f'matrix but received {len(__UpperCamelCase )} and {rowsa}'
)
raise ValueError(__UpperCamelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
A_ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
A_ = table.shape
strictly_diagonally_dominant(__UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCamelCase ):
A_ = []
for row in range(__UpperCamelCase ):
A_ = 0
for col in range(__UpperCamelCase ):
if col == row:
A_ = table[row][col]
elif col == cols - 1:
A_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ = (temp + val) / denom
new_val.append(__UpperCamelCase )
A_ = new_val
return [float(__UpperCamelCase ) for i in new_val]
def A_ (__a ):
'''simple docstring'''
A_ = table.shape
A_ = True
for i in range(0 , __UpperCamelCase ):
A_ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase_ : Optional[Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase_ : List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase_ : str = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase_ : Dict = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def A_ (__a ):
'''simple docstring'''
A_ = None
# source code of `config_class`
A_ = inspect.getsource(__a )
A_ = _re_checkpoint.findall(__a )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
A_ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
A_ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
A_ = ckpt_name
break
return checkpoint
def A_ ():
'''simple docstring'''
A_ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
A_ = get_checkpoint_from_config_class(__a )
A_ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__a )
if len(__a ) > 0:
A_ = "\n".join(sorted(__a ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 482 | 0 |
"""simple docstring"""
from copy import deepcopy
class __a :
def __init__( self : str , UpperCAmelCase_ : List[str] = None , UpperCAmelCase_ : Optional[Any] = None )-> Any:
"""simple docstring"""
if arr is None and size is not None:
UpperCamelCase = size
UpperCamelCase = [0] * size
elif arr is not None:
self.init(lowerCAmelCase_ )
else:
raise ValueError("Either arr or size must be specified" )
def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : Union[str, Any] )-> int:
"""simple docstring"""
UpperCamelCase = len(lowerCAmelCase_ )
UpperCamelCase = deepcopy(lowerCAmelCase_ )
for i in range(1 , self.size ):
UpperCamelCase = self.next_(lowerCAmelCase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Any:
"""simple docstring"""
UpperCamelCase = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase = self.next_(lowerCAmelCase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _SCREAMING_SNAKE_CASE ( UpperCAmelCase_ : Any )-> Optional[int]:
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def _SCREAMING_SNAKE_CASE ( UpperCAmelCase_ : List[str] )-> Tuple:
"""simple docstring"""
return index - (index & (-index))
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any )-> Optional[Any]:
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase = self.next_(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] )-> List[str]:
"""simple docstring"""
self.add(lowerCAmelCase_ , value - self.get(lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : Optional[int] )-> Optional[int]:
"""simple docstring"""
if right == 0:
return 0
UpperCamelCase = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase = self.prev(lowerCAmelCase_ )
return result
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] )-> int:
"""simple docstring"""
return self.prefix(lowerCAmelCase_ ) - self.prefix(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
return self.query(lowerCAmelCase_ , index + 1 )
def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : Dict )-> Optional[Any]:
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=True , snake_case__="pt" ):
A_ : Dict = {"""add_prefix_space""": True} if isinstance(snake_case__ , snake_case__ ) and not line.startswith(""" """ ) else {}
A_ : int = padding_side
return tokenizer(
[line] , max_length=snake_case__ , padding="""max_length""" if pad_to_max_length else None , truncation=snake_case__ , return_tensors=snake_case__ , add_special_tokens=snake_case__ , **snake_case__ , )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , ):
A_ : int = input_ids.ne(snake_case__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="train" , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="" , ):
super().__init__()
A_ : str = Path(lowerCAmelCase_ ).joinpath(type_path + """.source""" )
A_ : Tuple = Path(lowerCAmelCase_ ).joinpath(type_path + """.target""" )
A_ : Optional[Any] = self.get_char_lens(self.src_file )
A_ : Optional[Any] = max_source_length
A_ : Tuple = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
A_ : Tuple = tokenizer
A_ : Optional[int] = prefix
if n_obs is not None:
A_ : Union[str, Any] = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Union[str, Any] = tgt_lang
def __len__(self ):
return len(self.src_lens )
def __getitem__(self , lowerCAmelCase_ ):
A_ : Optional[Any] = index + 1 # linecache starts at 1
A_ : int = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase_ ).rstrip("""\n""" )
A_ : Any = linecache.getline(str(self.tgt_file ) , lowerCAmelCase_ ).rstrip("""\n""" )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : Optional[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase_ ) else self.tokenizer
)
A_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase_ ) else self.tokenizer
A_ : str = encode_line(lowerCAmelCase_ , lowerCAmelCase_ , self.max_source_length , """right""" )
A_ : Optional[Any] = encode_line(lowerCAmelCase_ , lowerCAmelCase_ , self.max_target_length , """right""" )
A_ : int = source_inputs["""input_ids"""].squeeze()
A_ : int = target_inputs["""input_ids"""].squeeze()
A_ : Tuple = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCamelCase(lowerCAmelCase_ ):
return [len(lowerCAmelCase_ ) for x in Path(lowerCAmelCase_ ).open().readlines()]
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : List[str] = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[int] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : Any = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase_ )
else self.tokenizer.pad_token_id
)
A_ : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase_ )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(lowerCAmelCase_ , lowerCAmelCase_ )
A_ , A_ : Dict = trim_batch(lowerCAmelCase_ , lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
A_ : Optional[Any] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
_lowerCAmelCase = getLogger(__name__)
def __UpperCamelCase ( snake_case__ ):
return list(itertools.chain.from_iterable(snake_case__ ) )
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = get_git_info()
save_json(snake_case__ , os.path.join(snake_case__ , """git_log.json""" ) )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=4 , **snake_case__ ):
with open(snake_case__ , """w""" ) as f:
json.dump(snake_case__ , snake_case__ , indent=snake_case__ , **snake_case__ )
def __UpperCamelCase ( snake_case__ ):
with open(snake_case__ ) as f:
return json.load(snake_case__ )
def __UpperCamelCase ( ):
A_ : Optional[int] = git.Repo(search_parent_directories=snake_case__ )
A_ : Union[str, Any] = {
"""repo_id""": str(snake_case__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __UpperCamelCase ( snake_case__ , snake_case__ ):
return list(map(snake_case__ , snake_case__ ) )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
with open(snake_case__ , """wb""" ) as f:
return pickle.dump(snake_case__ , snake_case__ )
def __UpperCamelCase ( snake_case__ ):
def remove_articles(snake_case__ ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , snake_case__ )
def white_space_fix(snake_case__ ):
return " ".join(text.split() )
def remove_punc(snake_case__ ):
A_ : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Tuple = normalize_answer(snake_case__ ).split()
A_ : Dict = normalize_answer(snake_case__ ).split()
A_ : int = Counter(snake_case__ ) & Counter(snake_case__ )
A_ : Dict = sum(common.values() )
if num_same == 0:
return 0
A_ : str = 1.0 * num_same / len(snake_case__ )
A_ : Any = 1.0 * num_same / len(snake_case__ )
A_ : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCamelCase ( snake_case__ , snake_case__ ):
return normalize_answer(snake_case__ ) == normalize_answer(snake_case__ )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
assert len(snake_case__ ) == len(snake_case__ )
A_ : Optional[Any] = 0
for hypo, pred in zip(snake_case__ , snake_case__ ):
em += exact_match_score(snake_case__ , snake_case__ )
if len(snake_case__ ) > 0:
em /= len(snake_case__ )
return {"em": em}
def __UpperCamelCase ( snake_case__ ):
return model_prefix.startswith("""rag""" )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : List[Any] = """dropout_rate"""
for p in extra_params:
if getattr(snake_case__ , snake_case__ , snake_case__ ):
if not hasattr(snake_case__ , snake_case__ ) and not hasattr(snake_case__ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(snake_case__ ) )
delattr(snake_case__ , snake_case__ )
continue
A_ : Dict = p if hasattr(snake_case__ , snake_case__ ) else equivalent_param[p]
setattr(snake_case__ , snake_case__ , getattr(snake_case__ , snake_case__ ) )
delattr(snake_case__ , snake_case__ )
return hparams, config
| 180 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase: int ={'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Optional[int] =['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
_UpperCamelCase: int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 585 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = emb.weight.shape
_lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = emb.weight.data
return lin_layer
def _a ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any]=None ):
"""simple docstring"""
_lowerCAmelCase = {}
for old_key in state_dict.keys():
_lowerCAmelCase = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_lowerCAmelCase = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_lowerCAmelCase = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_lowerCAmelCase = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_lowerCAmelCase = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_lowerCAmelCase = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_lowerCAmelCase = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_lowerCAmelCase = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_lowerCAmelCase = key.replace('final_layer_norm' , 'ff_layer_norm' )
_lowerCAmelCase = state_dict[old_key]
return new_dict
def _a ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 0
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
for expert in range(__SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )['model']
remove_ignore_keys_(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = rename_fairseq_keys(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , f'''-{len(__SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin''' ) )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__SCREAMING_SNAKE_CASE )[0]].dtype )
# Add the last block
_lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , f'''-{len(__SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin''' ) )
_lowerCAmelCase = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = rename_fairseq_keys(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_lowerCAmelCase = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__SCREAMING_SNAKE_CASE ) == 1:
_lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Otherwise, let's build the index
_lowerCAmelCase = {}
for idx, shard in enumerate(__SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__SCREAMING_SNAKE_CASE ):05d}.bin''' )
_lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
for key in shard:
_lowerCAmelCase = shard_file
# Add the metadata
_lowerCAmelCase = {'total_size': total_size}
_lowerCAmelCase = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase = json.dumps(__SCREAMING_SNAKE_CASE , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE ) + '\n'
f.write(__SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
_UpperCamelCase: Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
_UpperCamelCase: Tuple =parser.parse_args()
_UpperCamelCase , _UpperCamelCase: str =shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_UpperCamelCase: str =NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_UpperCamelCase: List[Any] =NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 585 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( _a: Dict , _a: Any , _a: Union[str, Any] , _a: str )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = FunnelConfig.from_json_file(_a )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase__ = FunnelBaseModel(_a ) if base_model else FunnelModel(_a )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_a , _a , _a )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _a )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 510 |
"""simple docstring"""
def snake_case ( _a: list )-> bool:
'''simple docstring'''
if not isinstance(_a , _a ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_a ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_a ) == 1:
return True
lowerCamelCase__ = series[1] - series[0]
for index in range(len(_a ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def snake_case ( _a: list )-> float:
'''simple docstring'''
if not isinstance(_a , _a ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_a ) == 0:
raise ValueError('Input list must be a non empty list' )
lowerCamelCase__ = 0
for val in series:
answer += val
return answer / len(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510 | 1 |
"""simple docstring"""
class snake_case :
def __init__( self : int , a__ : int ) -> Optional[int]:
'''simple docstring'''
_A = n
_A = [None] * self.n
_A = 0 # index of the first element
_A = 0
_A = 0
def __len__( self : str ) -> int:
'''simple docstring'''
return self.size
def a_ ( self : List[str] ) -> bool:
'''simple docstring'''
return self.size == 0
def a_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def a_ ( self : List[str] , a__ : int ) -> Tuple:
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
_A = data
_A = (self.rear + 1) % self.n
self.size += 1
return self
def a_ ( self : str ) -> str:
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW" )
_A = self.array[self.front]
_A = None
_A = (self.front + 1) % self.n
self.size -= 1
return temp | 621 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
# Return True if there is node that has not iterated.
_A = [False] * len(__lowercase )
_A = []
queue.append(__lowercase )
_A = True
while queue:
_A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_A = True
_A = u
return visited[t]
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
# This array is filled by BFS and to store path
_A = [-1] * (len(__lowercase ))
_A = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_A = float("Inf" )
_A = sink
while s != source:
# Find the minimum value in select path
_A = min(__lowercase , graph[parent[s]][s] )
_A = parent[s]
max_flow += path_flow
_A = sink
while v != source:
_A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_A = parent[v]
return max_flow
a_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a_ , a_ = 0, 5
print(ford_fulkerson(graph, source, sink)) | 621 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : int = logging.get_logger(__name__)
__lowercase : Union[str, Any] = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "convbert"
def __init__( self , __a=3_0522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=2 , __a=0.02 , __a=1E-1_2 , __a=1 , __a=0 , __a=2 , __a=768 , __a=2 , __a=9 , __a=1 , __a=None , **__a , ):
'''simple docstring'''
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__a : Any = vocab_size
__a : List[str] = hidden_size
__a : Dict = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : str = intermediate_size
__a : Union[str, Any] = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : Optional[Any] = max_position_embeddings
__a : Dict = type_vocab_size
__a : Optional[Any] = initializer_range
__a : List[Any] = layer_norm_eps
__a : Dict = embedding_size
__a : Union[str, Any] = head_ratio
__a : int = conv_kernel_size
__a : Optional[Any] = num_groups
__a : Any = classifier_dropout
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 476 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : List[str] = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = ['ConvNextFeatureExtractor']
__lowercase : int = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 476 | 1 |
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
lowerCAmelCase__ = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
lowerCAmelCase__ = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
A__ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
A__ = evaluate(dataset=lowercase_ , predictions=lowercase_ )
return score
| 716 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = DanceDiffusionPipeline
__lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
__lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase , use_timestep_embedding=lowercase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
A__ = IPNDMScheduler()
A__ = {
"unet": unet,
"scheduler": scheduler,
}
return components
def UpperCamelCase ( self , lowercase , lowercase=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(lowercase ).startswith("mps" ):
A__ = torch.manual_seed(lowercase )
else:
A__ = torch.Generator(device=lowercase ).manual_seed(lowercase )
A__ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = DanceDiffusionPipeline(**lowercase )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = self.get_dummy_inputs(lowercase )
A__ = pipe(**lowercase )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
A__ = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = torch_device
A__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = torch.manual_seed(0 )
A__ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.096 )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A__ = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = torch_device
A__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = torch.manual_seed(0 )
A__ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.096 )
A__ = output.audios
A__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A__ = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 626 | 0 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCamelCase_ ( a_ ):
@require_torch
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
UpperCAmelCase = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
UpperCAmelCase = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
UpperCAmelCase = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task="""fill-mask""" , model=snake_case__ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
UpperCAmelCase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = """1"""
UpperCAmelCase = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
UpperCAmelCase = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
UpperCAmelCase = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
UpperCAmelCase = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task="""fill-mask""" , model=snake_case__ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
UpperCAmelCase = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
UpperCAmelCase = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
UpperCAmelCase = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = """1"""
UpperCAmelCase = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = """
from transformers import pipeline
"""
UpperCAmelCase = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
UpperCAmelCase = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
UpperCAmelCase = self.get_env()
UpperCAmelCase = """1"""
UpperCAmelCase = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
UpperCAmelCase = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = """
from transformers import AutoModel
"""
UpperCAmelCase = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = """1"""
UpperCAmelCase = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 673 |
"""simple docstring"""
import math
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return math.sqrt(lowerCAmelCase ) * math.sqrt(lowerCAmelCase ) == num
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = n
while left <= right:
UpperCAmelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase = mid - 1
else:
UpperCAmelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = 'bert-generation'
def __init__(self , __lowercase=5_03_58 , __lowercase=10_24 , __lowercase=24 , __lowercase=16 , __lowercase=40_96 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=0 , __lowercase=2 , __lowercase=1 , __lowercase="absolute" , __lowercase=True , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
| 474 |
'''simple docstring'''
from manim import *
class a__ ( __A ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
__lowerCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__lowerCAmelCase = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__lowerCAmelCase = VGroup(__lowercase , __lowercase ).arrange(__lowercase , buff=0 )
__lowerCAmelCase = Text('''CPU''' , font_size=24 )
__lowerCAmelCase = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
__lowerCAmelCase = [mem.copy() for i in range(1 )]
__lowerCAmelCase = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__lowerCAmelCase = Text('''GPU''' , font_size=24 )
__lowerCAmelCase = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
gpu.align_to(__lowercase , __lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(__lowercase )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__lowercase ).arrange(__lowercase , buff=0 )
__lowerCAmelCase = Text('''Model''' , font_size=24 )
__lowerCAmelCase = Group(__lowercase , __lowercase ).arrange(__lowercase , buff=0.5 , aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) , Create(__lowercase , run_time=1 ) , )
__lowerCAmelCase = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
__lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCAmelCase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase , run_time=2.5 ) , Write(__lowercase ) , Write(__lowercase ) )
self.add(__lowercase )
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for i, rect in enumerate(__lowercase ):
__lowerCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(__lowercase , opacity=0.7 )
cpu_target.move_to(__lowercase )
cpu_target.generate_target()
__lowerCAmelCase = 0.4_6 / 4
__lowerCAmelCase = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__lowercase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__lowercase , buff=0.0 )
cpu_targs.append(__lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__lowercase ) )
second_animations.append(MoveToTarget(__lowercase , run_time=1.5 ) )
self.play(*__lowercase )
self.play(*__lowercase )
self.wait()
| 474 | 1 |
"""simple docstring"""
import os
def __magic_name__ ( __snake_case : str = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) as in_file:
lowercase : Optional[Any] = in_file.read()
lowercase : Optional[int] = [[int(__snake_case ) for cell in row.split("," )] for row in data.strip().splitlines()]
lowercase : List[Any] = [[0 for cell in row] for row in grid]
lowercase : Optional[Any] = len(grid[0] )
lowercase : Any = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
lowercase : str = grid[0][0]
for i in range(1 , __snake_case ):
lowercase : str = grid[0][i] + dp[0][i - 1]
for i in range(1 , __snake_case ):
lowercase : List[Any] = grid[i][0] + dp[i - 1][0]
for i in range(1 , __snake_case ):
for j in range(1 , __snake_case ):
lowercase : int = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"{solution() = }")
| 361 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''wavlm'''
def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__)
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = conv_bias
snake_case__ = num_buckets
snake_case__ = max_bucket_distance
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim)
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = num_ctc_classes
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = xvector_output_dim
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 654 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case__ ( unittest.TestCase ):
@property
def a__ ( self ):
torch.manual_seed(0 )
__a = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def a__ ( self ):
__a = self.dummy_uncond_unet
__a = KarrasVeScheduler()
__a = KarrasVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = torch.manual_seed(0 )
__a = pipe(num_inference_steps=2 , generator=lowerCamelCase , output_type="numpy" ).images
__a = torch.manual_seed(0 )
__a = pipe(num_inference_steps=2 , generator=lowerCamelCase , output_type="numpy" , return_dict=lowerCamelCase )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = "google/ncsnpp-celebahq-256"
__a = UNetaDModel.from_pretrained(lowerCamelCase )
__a = KarrasVeScheduler()
__a = KarrasVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__a = torch.manual_seed(0 )
__a = pipe(num_inference_steps=20 , generator=lowerCamelCase , output_type="numpy" ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__a = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 710 | """simple docstring"""
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE__:Optional[int] = tuple[int, int]
class snake_case__ :
def __init__( self ):
__a = []
__a = set()
def a__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def a__ ( self ):
return len(self.elements ) == 0
def a__ ( self , lowerCamelCase , lowerCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase )
else:
# update
# print("update", item)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def a__ ( self , lowerCamelCase ):
if item in self.set:
self.set.remove(lowerCamelCase )
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def a__ ( self ):
return self.elements[0][1]
def a__ ( self ):
((__a) , (__a)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase )
return (priority, item)
def _lowerCamelCase( a , a ):
# euclidean distance
__a = np.array(a )
__a = np.array(a )
return np.linalg.norm(a - b )
def _lowerCamelCase( a , a ):
# integer division by time variable
return consistent_heuristic(a , a ) // t
def _lowerCamelCase( a , a ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCamelCase( a , a , a , a ):
__a = g_function[start] + Wa * heuristics[i](a , a )
return ans
def _lowerCamelCase( a , a , a ):
__a = np.chararray((n, n) )
for i in range(a ):
for j in range(a ):
__a = "*"
for i in range(a ):
for j in range(a ):
if (j, (n - 1) - i) in blocks:
__a = "#"
__a = "-"
__a = back_pointer[goal]
while x != start:
((__a) , (__a)) = x
# print(x)
__a = "-"
__a = back_pointer[x]
__a = "-"
for i in range(a ):
for j in range(a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__a = back_pointer[goal]
while x != start:
print(a , end=" " )
__a = back_pointer[x]
print(a )
sys.exit()
def _lowerCamelCase( a ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCamelCase( a , a , a , a , a , a , a , a , ):
for itera in range(a ):
open_list[itera].remove_element(a )
# print("s", s)
# print("j", j)
((__a) , (__a)) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a )
__a = -1
__a = float("inf" )
if valid(a ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(a , key(a , 0 , a , a ) )
if neighbours not in close_list_inad:
for var in range(1 , a ):
if key(a , a , a , a ) <= Wa * key(
a , 0 , a , a ):
open_list[j].put(
a , key(a , a , a , a ) )
def _lowerCamelCase( ):
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE__:Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE__:str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
SCREAMING_SNAKE_CASE__:int = make_common_ground()
SCREAMING_SNAKE_CASE__:List[str] = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE__:str = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 20
SCREAMING_SNAKE_CASE__:Dict = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE__:Dict = (0, 0)
SCREAMING_SNAKE_CASE__:Optional[Any] = (n - 1, n - 1)
SCREAMING_SNAKE_CASE__:List[str] = 1
def _lowerCamelCase( a , a , a ):
__a = {start: 0, goal: float("inf" )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(a ):
open_list.append(PriorityQueue() )
open_list[i].put(a , key(a , a , a , a ) )
__a = []
__a = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a , __a = open_list[i].top_show()
visited.add(a )
expand_state(
a , a , a , a , a , a , a , a , )
close_list_inad.append(a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a = open_list[0].top_show()
visited.add(a )
expand_state(
a , 0 , a , a , a , a , a , a , )
close_list_anchor.append(a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 67 | 0 |
'''simple docstring'''
import operator as op
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[]
_UpperCamelCase =lambda __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int(x / y ) # noqa: E731 integer division operation
_UpperCamelCase ={
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(__SCREAMING_SNAKE_CASE )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__SCREAMING_SNAKE_CASE ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(__SCREAMING_SNAKE_CASE ) , sep=''' | ''' )
else:
_UpperCamelCase =stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(__SCREAMING_SNAKE_CASE ) , sep=''' | ''' )
_UpperCamelCase =stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(__SCREAMING_SNAKE_CASE ) , sep=''' | ''' )
stack.append(
str(opr[x](int(__SCREAMING_SNAKE_CASE ) , int(__SCREAMING_SNAKE_CASE ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(__SCREAMING_SNAKE_CASE ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 404 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = (UnCLIPScheduler,)
def UpperCamelCase__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict ) -> Optional[Any]:
_UpperCamelCase ={
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**UpperCamelCase__ )
return config
def UpperCamelCase__ ( self : str ) -> Tuple:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[str] ) -> int:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[str] ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[Any] ) -> List[str]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[Any] ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ) -> Tuple:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[str] ) -> Dict:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config(variance_type='''fixed_small_log''' )
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1E-5
def UpperCamelCase__ ( self : Any ) -> Tuple:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config(variance_type='''learned_range''' )
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
_UpperCamelCase =0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1712790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7998052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0010011 < 1E-5
def UpperCamelCase__ ( self : str ) -> Optional[int]:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config()
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
_UpperCamelCase =scheduler.timesteps
_UpperCamelCase =self.dummy_model()
_UpperCamelCase =self.dummy_sample_deter
_UpperCamelCase =torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
_UpperCamelCase =model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
_UpperCamelCase =scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
_UpperCamelCase =pred_prev_sample
_UpperCamelCase =torch.sum(torch.abs(UpperCamelCase__ ) )
_UpperCamelCase =torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2682495 ) < 1E-2
assert abs(result_mean.item() - 0.3284743 ) < 1E-3
def UpperCamelCase__ ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config()
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
_UpperCamelCase =scheduler.timesteps
_UpperCamelCase =self.dummy_model()
_UpperCamelCase =self.dummy_sample_deter
_UpperCamelCase =torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
_UpperCamelCase =model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
_UpperCamelCase =None
else:
_UpperCamelCase =timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_UpperCamelCase =scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
_UpperCamelCase =pred_prev_sample
_UpperCamelCase =torch.sum(torch.abs(UpperCamelCase__ ) )
_UpperCamelCase =torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2044983 ) < 1E-2
assert abs(result_mean.item() - 0.3362038 ) < 1E-3
def UpperCamelCase__ ( self : str ) -> Any:
pass
def UpperCamelCase__ ( self : List[str] ) -> str:
pass
| 404 | 1 |
UpperCamelCase = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
UpperCamelCase = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
_lowercase : List[str] = from_type.lower().strip('s' )
_lowercase : Optional[Any] = to_type.lower().strip('s' )
_lowercase : int = UNIT_SYMBOL.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = UNIT_SYMBOL.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if from_sanitized not in METRIC_CONVERSION:
_lowercase : Any = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(SCREAMING_SNAKE_CASE )}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
if to_sanitized not in METRIC_CONVERSION:
_lowercase : Dict = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(SCREAMING_SNAKE_CASE )}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = METRIC_CONVERSION[from_sanitized]
_lowercase : Any = METRIC_CONVERSION[to_sanitized]
_lowercase : str = 1
if from_exponent > to_exponent:
_lowercase : str = from_exponent - to_exponent
else:
_lowercase : str = -(to_exponent - from_exponent)
return value * pow(10 , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 677 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPFeatureExtractor"]
UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
from math import isqrt
def UpperCAmelCase ( a_ ) -> list[int]:
"""simple docstring"""
__A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a_ , a_ ):
__A = False
return [i for i in range(2 , a_ ) if is_prime[i]]
def UpperCAmelCase ( a_ = 1_0**8 ) -> int:
"""simple docstring"""
__A = calculate_prime_numbers(max_number // 2 )
__A = 0
__A = 0
__A = len(a_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A__ = logging.get_logger(__name__)
A__ = {'''vocab_file''': '''vocab.txt'''}
A__ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
A__ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
A__ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[Any] = ConvBertTokenizer
def __init__( self :Any ,__lowercase :Optional[int]=None ,__lowercase :str=None ,__lowercase :Union[str, Any]=True ,__lowercase :Dict="[UNK]" ,__lowercase :List[Any]="[SEP]" ,__lowercase :int="[PAD]" ,__lowercase :Union[str, Any]="[CLS]" ,__lowercase :List[str]="[MASK]" ,__lowercase :List[Any]=True ,__lowercase :List[str]=None ,**__lowercase :List[str] ,):
super().__init__(
__lowercase ,tokenizer_file=__lowercase ,do_lower_case=__lowercase ,unk_token=__lowercase ,sep_token=__lowercase ,pad_token=__lowercase ,cls_token=__lowercase ,mask_token=__lowercase ,tokenize_chinese_chars=__lowercase ,strip_accents=__lowercase ,**__lowercase ,)
snake_case__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,__lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,__lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,__lowercase ) != tokenize_chinese_chars
):
snake_case__ : Union[str, Any] = getattr(__lowercase ,normalizer_state.pop('''type''' ) )
snake_case__ : int = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : List[str] = tokenize_chinese_chars
snake_case__ : Tuple = normalizer_class(**__lowercase )
snake_case__ : Any = do_lower_case
def __lowerCamelCase ( self :int ,__lowercase :Union[str, Any] ,__lowercase :List[Any]=None ):
snake_case__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self :List[Any] ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : str = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :str ,__lowercase :Optional[str] = None ):
snake_case__ : Optional[int] = self._tokenizer.model.save(__lowercase ,name=__lowercase )
return tuple(__lowercase )
| 252 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase: Dict = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowerCamelCase__ ( _A , _A , _A=None , _A=None , _A=None , _A=None , _A=None , _A=None , ):
if attention_mask is None:
a : Tuple = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
a : Dict = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
a : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class a__:
def __init__( self : List[str] , __snake_case : Optional[Any] , __snake_case : Optional[int]=13 , __snake_case : Dict=7 , __snake_case : Union[str, Any]=True , __snake_case : Optional[int]=False , __snake_case : int=99 , __snake_case : Optional[Any]=16 , __snake_case : List[str]=2 , __snake_case : Union[str, Any]=4 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]="gelu" , __snake_case : int=0.1 , __snake_case : Dict=0.1 , __snake_case : Union[str, Any]=32 , __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=1 , __snake_case : int=0 , __snake_case : Optional[int]=0.02 , ):
a : List[Any] = parent
a : Union[str, Any] = batch_size
a : Optional[int] = seq_length
a : Dict = is_training
a : Union[str, Any] = use_labels
a : Optional[Any] = vocab_size
a : List[str] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Any = intermediate_size
a : Dict = hidden_act
a : Any = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : List[Any] = max_position_embeddings
a : Optional[int] = eos_token_id
a : Any = pad_token_id
a : Optional[int] = bos_token_id
a : List[Any] = initializer_range
def lowercase_ ( self : Union[str, Any] ):
a : Tuple = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
a : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
a : Any = shift_tokens_right(__snake_case , 1 , 2 )
a : List[str] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__snake_case , )
a : str = prepare_blenderbot_inputs_dict(__snake_case , __snake_case , __snake_case )
return config, inputs_dict
def lowercase_ ( self : Any ):
a : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[Any] , __snake_case : int ):
a : Optional[int] = 20
a : str = model_class_name(__snake_case )
a : Tuple = model.encode(inputs_dict['input_ids'] )
a : List[str] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
a : str = model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case )
a : Optional[int] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
a : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a : Any = model.decode(
decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , )
a : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
a : int = model.decode(
decoder_input_ids[:, -1:] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__snake_case , )
a : str = model.decode(__snake_case , __snake_case )
a : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : List[str] ):
a : Dict = 20
a : Any = model_class_name(__snake_case )
a : Any = model.encode(inputs_dict['input_ids'] )
a : Any = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
a : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
a : List[str] = model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case )
a : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a : str = model.decode(
decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , )
a : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
a : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , __snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__snake_case , decoder_position_ids=__snake_case , )
a : Dict = model.decode(__snake_case , __snake_case , decoder_attention_mask=__snake_case )
a : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class a__( unittest.TestCase ):
lowercase__ = 99
def lowercase_ ( self : int ):
a : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
a : List[Any] = input_ids.shape[0]
a : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self : Optional[Any] ):
a : List[str] = self._get_config_and_data()
a : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(__snake_case )
a : int = lm_model(input_ids=__snake_case )
a : Any = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , __snake_case )
def lowercase_ ( self : List[str] ):
a : int = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
a : int = FlaxBlenderbotSmallForConditionalGeneration(__snake_case )
a : Optional[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
a : int = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
a : Union[str, Any] = lm_model(input_ids=__snake_case , decoder_input_ids=__snake_case )
a : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , __snake_case )
def lowercase_ ( self : List[str] ):
a : int = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
a : str = shift_tokens_right(__snake_case , 1 , 2 )
a : Tuple = np.equal(__snake_case , 1 ).astype(np.floataa ).sum()
a : Optional[Any] = np.equal(__snake_case , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__snake_case , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class a__( lowerCamelCase__ , unittest.TestCase , lowerCamelCase__ ):
lowercase__ = True
lowercase__ = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self : Union[str, Any] ):
a : List[Any] = FlaxBlenderbotSmallModelTester(self )
def lowercase_ ( self : Tuple ):
a : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__snake_case , __snake_case , __snake_case )
def lowercase_ ( self : Optional[Any] ):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__snake_case , __snake_case , __snake_case )
def lowercase_ ( self : Dict ):
a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a : List[str] = self._prepare_for_class(__snake_case , __snake_case )
a : Optional[int] = model_class(__snake_case )
@jax.jit
def encode_jitted(__snake_case : Dict , __snake_case : Tuple=None , **__snake_case : Optional[Any] ):
return model.encode(input_ids=__snake_case , attention_mask=__snake_case )
with self.subTest('JIT Enabled' ):
a : int = encode_jitted(**__snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a : List[str] = encode_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : List[Any] ):
a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a : Union[str, Any] = model_class(__snake_case )
a : Union[str, Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
a : str = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(__snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
return model.decode(
decoder_input_ids=__snake_case , decoder_attention_mask=__snake_case , encoder_outputs=__snake_case , )
with self.subTest('JIT Enabled' ):
a : str = decode_jitted(**__snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a : List[str] = decode_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self : Tuple ):
for model_class_name in self.all_model_classes:
a : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
a : int = np.ones((1, 1) ) * model.config.eos_token_id
a : List[str] = model(__snake_case )
self.assertIsNotNone(__snake_case ) | 719 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowerCAmelCase: Any = False
@skip_mps
class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableDiffusionAttendAndExcitePipeline
lowercase__ = False
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowercase_ ( cls : Union[str, Any] ):
super().setUpClass()
torch.use_deterministic_algorithms(__snake_case )
@classmethod
def lowercase_ ( cls : Optional[Any] ):
super().tearDownClass()
torch.use_deterministic_algorithms(__snake_case )
def lowercase_ ( self : List[Any] ):
torch.manual_seed(0 )
a : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__snake_case , )
a : Optional[int] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
a : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
a : Any = CLIPTextModel(__snake_case )
a : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self : str , __snake_case : Tuple , __snake_case : Optional[int]=0 ):
if str(__snake_case ).startswith('mps' ):
a : Any = torch.manual_seed(__snake_case )
else:
a : Any = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : int = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def lowercase_ ( self : List[Any] ):
a : Union[str, Any] = 'cpu'
a : Any = self.get_dummy_components()
a : List[str] = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a : Any = self.get_dummy_inputs(__snake_case )
a : int = pipe(**__snake_case ).images
a : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
a : str = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
a : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case , 1e-3 )
def lowercase_ ( self : Dict ):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def lowercase_ ( self : Tuple ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def lowercase_ ( self : Tuple ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowercase_ ( self : str ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def lowercase_ ( self : Any ):
super().test_save_load_local(expected_max_difference=5e-4 )
def lowercase_ ( self : List[Any] ):
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class a__( unittest.TestCase ):
@classmethod
def lowercase_ ( cls : Union[str, Any] ):
super().setUpClass()
torch.use_deterministic_algorithms(__snake_case )
@classmethod
def lowercase_ ( cls : Union[str, Any] ):
super().tearDownClass()
torch.use_deterministic_algorithms(__snake_case )
def lowercase_ ( self : Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = torch.manual_seed(51 )
a : Dict = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=__snake_case , torch_dtype=torch.floataa )
pipe.to('cuda' )
a : Optional[Any] = 'a painting of an elephant with glasses'
a : Any = [5, 7]
a : Tuple = pipe(
prompt=__snake_case , token_indices=__snake_case , guidance_scale=7.5 , generator=__snake_case , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
a : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1 | 195 | 0 |
def _SCREAMING_SNAKE_CASE ( __lowercase : Any , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : int ) -> int:
"""simple docstring"""
__A = len(snake_case__ ), len(grid[0] )
if (
min(snake_case__ , snake_case__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__A = 0
count += depth_first_search(snake_case__ , row + 1 , snake_case__ , snake_case__ )
count += depth_first_search(snake_case__ , row - 1 , snake_case__ , snake_case__ )
count += depth_first_search(snake_case__ , snake_case__ , col + 1 , snake_case__ )
count += depth_first_search(snake_case__ , snake_case__ , col - 1 , snake_case__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 637 | import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any=13 , __lowerCamelCase: Dict=7 , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: int=True , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Optional[int]=99 , __lowerCamelCase: int=16 , __lowerCamelCase: Optional[int]=36 , __lowerCamelCase: str=6 , __lowerCamelCase: Optional[Any]=6 , __lowerCamelCase: str=6 , __lowerCamelCase: Optional[int]=37 , __lowerCamelCase: Any="gelu" , __lowerCamelCase: List[str]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Dict=5_12 , __lowerCamelCase: List[str]=16 , __lowerCamelCase: Optional[int]=2 , __lowerCamelCase: List[Any]=0.02 , __lowerCamelCase: Optional[int]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: List[str]=None , ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : Optional[int] = seq_length
__UpperCAmelCase : Union[str, Any] = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : Any = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[str] = embedding_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Any = num_hidden_groups
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : List[Any] = type_vocab_size
__UpperCAmelCase : Optional[Any] = type_sequence_label_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : List[Any] = num_labels
__UpperCAmelCase : Optional[int] = num_choices
__UpperCAmelCase : Dict = scope
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : int = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self: Any ) -> List[str]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any , __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any ) -> Dict:
__UpperCAmelCase : List[Any] = AlbertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Any:
__UpperCAmelCase : str = AlbertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _lowerCamelCase ( self: str , __lowerCamelCase: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : str = AlbertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[int] ) -> Tuple:
__UpperCAmelCase : str = AlbertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : int = AlbertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[int] ) -> Dict:
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : int = AlbertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: str ) -> str:
__UpperCAmelCase : Dict = self.num_choices
__UpperCAmelCase : Optional[Any] = AlbertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self: Any ) -> Any:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
__UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__: Union[str, Any] = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__: List[str] = True
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str]=False ) -> Dict:
__UpperCAmelCase : Optional[Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
__UpperCAmelCase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
__UpperCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
__UpperCAmelCase : Any = AlbertModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Any:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : str = AlbertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : str = AlbertModel.from_pretrained("albert-base-v2" )
__UpperCAmelCase : List[str] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCAmelCase : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : List[str] = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 382 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Optional[Any]="resnet50" , lowerCamelCase__ : Any=3 , lowerCamelCase__ : Tuple=32 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Optional[Any]=True , ):
a__ : int = parent
a__ : int = out_indices if out_indices is not None else [4]
a__ : str = stage_names
a__ : List[str] = out_features
a__ : Optional[int] = backbone
a__ : Optional[Any] = batch_size
a__ : Tuple = image_size
a__ : int = num_channels
a__ : Tuple = use_pretrained_backbone
a__ : int = is_training
def _UpperCamelCase( self : Tuple ):
a__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : List[Any] = self.get_config()
return config, pixel_values
def _UpperCamelCase( self : List[str] ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] ):
a__ : List[Any] = TimmBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
a__ : Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _UpperCamelCase( self : str ):
a__ : str = self.prepare_config_and_inputs()
a__, a__ : Optional[Any] = config_and_inputs
a__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A__ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (TimmBackbone,) if is_torch_available() else ()
_lowercase = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Any ):
a__ : List[str] = TimmBackboneModelTester(self )
a__ : List[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase( self : Dict ):
a__ : Optional[int] = "resnet18"
a__ : Optional[int] = "microsoft/resnet-18"
a__ : List[Any] = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ )
a__ : Tuple = AutoBackbone.from_pretrained(lowerCamelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a__ : int = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ , out_indices=[1, 2, 3] )
a__ : Optional[int] = AutoBackbone.from_pretrained(lowerCamelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def _UpperCamelCase( self : Optional[int] ):
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def _UpperCamelCase( self : int ):
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def _UpperCamelCase( self : int ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def _UpperCamelCase( self : Optional[Any] ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def _UpperCamelCase( self : int ):
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def _UpperCamelCase( self : str ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _UpperCamelCase( self : Optional[Any] ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def _UpperCamelCase( self : List[str] ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def _UpperCamelCase( self : str ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _UpperCamelCase( self : Union[str, Any] ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _UpperCamelCase( self : Optional[Any] ):
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def _UpperCamelCase( self : List[str] ):
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def _UpperCamelCase( self : Tuple ):
pass
@unittest.skip("Safetensors is not supported by timm." )
def _UpperCamelCase( self : str ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCamelCase( self : str ):
pass
def _UpperCamelCase( self : Any ):
a__, a__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowerCamelCase__ )
a__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : int = [*signature.parameters.keys()]
a__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : Tuple ):
a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
a__ : List[Any] = True
a__ : int = self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ : List[str] = self.all_model_classes[0]
a__ : Union[str, Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
a__ : int = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = model(**lowerCamelCase__ )
a__ : int = outputs[0][-1]
# Encoder-/Decoder-only models
a__ : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ : Dict = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _UpperCamelCase( self : int ):
a__, a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Dict = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a__ : int = copy.deepcopy(lowerCamelCase__ )
a__ : List[Any] = None
a__ : Dict = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : int = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a__ : List[str] = copy.deepcopy(lowerCamelCase__ )
a__ : List[Any] = False
a__ : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Dict = model(**lowerCamelCase__ )
| 701 |
def UpperCamelCase_ ( __a ) -> int:
if not isinstance(__a , __a ):
raise TypeError("only integers accepted as input" )
else:
a__ : Union[str, Any] = str(abs(__a ) )
a__ : Dict = [list(__a ) for char in range(len(__a ) )]
for index in range(len(__a ) ):
num_transpositions[index].pop(__a )
return max(
int("".join(list(__a ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 151 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
if not is_accelerate_available():
return method
snake_case__ = version.parse(accelerate.__version__ ).base_version
if version.parse(__lowerCAmelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *__lowerCAmelCase , **__lowerCAmelCase ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *__lowerCAmelCase , **__lowerCAmelCase )
return wrapper
| 33 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Optional[int] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 | 0 |
from __future__ import annotations
def _lowerCamelCase ( a_ : list[int]): # This function is recursive
lowerCamelCase :Union[str, Any] = len(a_)
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase :Optional[Any] = array[0]
lowerCamelCase :Any = False
lowerCamelCase :Tuple = 1
lowerCamelCase :list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase :Any = True
lowerCamelCase :Optional[int] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase :List[Any] = longest_subsequence(a_)
if len(a_) > len(a_):
lowerCamelCase :int = temp_array
else:
i += 1
lowerCamelCase :Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase :Union[str, Any] = [pivot, *longest_subsequence(a_)]
if len(a_) > len(a_):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = KandinskyInpaintPipeline
_lowercase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowercase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowercase = False
@property
def snake_case_ ( self: int ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self: Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim,transformerDimensions=self.text_embedder_hidden_size,hidden_size=self.text_embedder_hidden_size,intermediate_size=37,num_attention_heads=4,num_hidden_layers=5,vocab_size=1005,)
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def snake_case_ ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def snake_case_ ( self: str ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=1000,beta_schedule='linear',beta_start=0.0_0_0_8_5,beta_end=0.0_1_2,clip_sample=A_,set_alpha_to_one=A_,steps_offset=1,prediction_type='epsilon',thresholding=A_,)
__UpperCamelCase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case_ ( self: Tuple,A_: Optional[int],A_: Dict=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim),rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0,2,3,1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create mask
__UpperCamelCase = np.ones((64, 64),dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ),return_dict=A_,)[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__UpperCamelCase = np.ones((768, 768),dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = 'a hat'
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior',torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint',torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase, __UpperCamelCase = pipe_prior(
A_,generator=A_,num_inference_steps=5,negative_prompt='',).to_tuple()
__UpperCamelCase = pipeline(
A_,image=A_,mask_image=A_,image_embeds=A_,negative_image_embeds=A_,generator=A_,num_inference_steps=100,height=768,width=768,output_type='np',)
__UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_,A_ )
| 1 | import numpy as np
def __lowerCAmelCase ( _A ,_A ,_A = 1E-12 ,_A = 100 ,):
"""simple docstring"""
assert np.shape(_A )[0] == np.shape(_A )[1]
# Ensure proper dimensionality.
assert np.shape(_A )[0] == np.shape(_A )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_A ) == np.iscomplexobj(_A )
_lowercase = np.iscomplexobj(_A )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_A ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowercase = False
_lowercase = 0
_lowercase = 0
_lowercase = 1E12
while not convergence:
# Multiple matrix by the vector.
_lowercase = np.dot(_A ,_A )
# Normalize the resulting output vector.
_lowercase = w / np.linalg.norm(_A )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowercase = vector.conj().T if is_complex else vector.T
_lowercase = np.dot(_A ,np.dot(_A ,_A ) )
# Check convergence.
_lowercase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowercase = True
_lowercase = lambda_
if is_complex:
_lowercase = np.real(lambda_ )
return lambda_, vector
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowercase = np.array([41, 4, 20] )
_lowercase = real_input_matrix.astype(np.complexaaa )
_lowercase = np.triu(1J * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowercase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowercase = real_input_matrix
_lowercase = real_vector
elif problem_type == "complex":
_lowercase = complex_input_matrix
_lowercase = complex_vector
# Our implementation.
_lowercase , _lowercase = power_iteration(_A ,_A )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowercase , _lowercase = np.linalg.eigh(_A )
# Last eigenvalue is the maximum one.
_lowercase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowercase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_A ) - np.abs(_A ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 398 | 0 |
from datetime import datetime
import requests
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : Any = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__lowercase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
lowerCamelCase : List[Any] = input('''Enter Video/IGTV url: ''').strip()
lowerCamelCase : Any = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''') | 709 |
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 649 | 0 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Any: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def a__ ( ) -> int:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCAmelCase_ : List[str] = [1, 2, 3]
with pytest.raises(_SCREAMING_SNAKE_CASE ):
with parallel_backend("unsupported backend" ):
map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=2 )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
with parallel_backend("unsupported backend" ):
map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [1, 2]
UpperCAmelCase_ : str = {"a": 1, "b": 2}
UpperCAmelCase_ : List[Any] = {"a": [1, 2], "b": [3, 4]}
UpperCAmelCase_ : Optional[Any] = {"a": {"1": 1}, "b": 2}
UpperCAmelCase_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCAmelCase_ : str = [2, 3]
UpperCAmelCase_ : Union[str, Any] = {"a": 2, "b": 3}
UpperCAmelCase_ : int = {"a": [2, 3], "b": [4, 5]}
UpperCAmelCase_ : Dict = {"a": {"1": 2}, "b": 3}
UpperCAmelCase_ : Any = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
| 71 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Any =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Dict =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
| 71 | 1 |
def _UpperCamelCase (a__ :Optional[int] , a__ :int ):
"""simple docstring"""
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 706 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCamelCase__ = CLIPImageProcessor()
UpperCamelCase__ = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCamelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 548 | 0 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = str(id_ )
snake_case__ : Dict = None
snake_case__ : List[Any] = None
snake_case__ : Optional[int] = []
snake_case__ : Tuple = {} # {vertex:distance}
def __lt__( self , __SCREAMING_SNAKE_CASE ):
return self.key < other.key
def __repr__( self ):
return self.id
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
self.neighbors.append(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = weight
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __magic_name__ )
graph[b - 1].add_edge(graph[a - 1] , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : list , __magic_name__ : Vertex ) -> list:
'''simple docstring'''
snake_case__ : Optional[int] = []
for u in graph:
snake_case__ : str = math.inf
snake_case__ : List[Any] = None
snake_case__ : Dict = 0
snake_case__ : Tuple = graph[:]
while q:
snake_case__ : Any = min(__magic_name__ )
q.remove(__magic_name__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
snake_case__ : Optional[int] = u
snake_case__ : Dict = u.edges[v.id]
for i in range(1 , len(__magic_name__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase__ ( __magic_name__ : list , __magic_name__ : Vertex ) -> Iterator[tuple]:
'''simple docstring'''
for u in graph:
snake_case__ : Tuple = math.inf
snake_case__ : Tuple = None
snake_case__ : Optional[int] = 0
snake_case__ : str = list(__magic_name__ )
hq.heapify(__magic_name__ )
while h:
snake_case__ : str = hq.heappop(__magic_name__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
snake_case__ : Union[str, Any] = u
snake_case__ : Dict = u.edges[v.id]
hq.heapify(__magic_name__ )
for i in range(1 , len(__magic_name__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _snake_case ( lowercase__):
UpperCamelCase__ : List[str] =ComputeEnvironment.AMAZON_SAGEMAKER
UpperCamelCase__ : Tuple =True
UpperCamelCase__ : int ="""ml.p3.2xlarge"""
UpperCamelCase__ : Any ="""accelerate_sagemaker_execution_role"""
UpperCamelCase__ : Dict ="""hf-sm"""
UpperCamelCase__ : Optional[int] ="""us-east-1"""
UpperCamelCase__ : Optional[Any] =1
UpperCamelCase__ : int ="""accelerate-sagemaker-1"""
UpperCamelCase__ : Union[str, Any] ="""1.6"""
UpperCamelCase__ : str ="""4.4"""
UpperCamelCase__ : str ="""train.py"""
UpperCamelCase__ : Union[str, Any] =[
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
UpperCamelCase__ : str =[
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class _snake_case ( unittest.TestCase):
def A__ ( self : Optional[int] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowercase__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"], __lowercase )
assert isinstance(converted_args["do_train"], __lowercase )
assert isinstance(converted_args["epochs"], __lowercase )
assert isinstance(converted_args["learning_rate"], __lowercase )
assert isinstance(converted_args["max_steps"], __lowercase )
with pytest.raises(__lowercase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 413 | 0 |
import string
from math import logaa
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
snake_case_ = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case_ = corpus_without_punctuation.split('\n' )
snake_case_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase__ ))
def a(lowercase__ , lowercase__ , lowercase__=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
return round(tf * idf , 3 )
| 46 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(__UpperCamelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('inf' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(__UpperCamelCase , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1E-12 )
tf.debugging.assert_equal(__UpperCamelCase , __UpperCamelCase )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase , __snake_case ):
"""simple docstring"""
if is_tf_available():
__A = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 2
snake_case_ = 2
class SCREAMING_SNAKE_CASE ( tf.Module ):
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
super(__UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=__UpperCamelCase , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.model.generate(
input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , max_new_tokens=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_02, 1_03]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__UpperCamelCase , __UpperCamelCase , signatures={'serving_default': dummy_model.serving} )
snake_case_ = tf.saved_model.load(__UpperCamelCase ).signatures['serving_default']
for batch_size in range(1 , len(__UpperCamelCase ) + 1 ):
snake_case_ = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**__UpperCamelCase )['sequences']
snake_case_ = test_model.generate(**__UpperCamelCase , max_new_tokens=__UpperCamelCase )
tf.debugging.assert_equal(__UpperCamelCase , __UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 1
snake_case_ = 2
class SCREAMING_SNAKE_CASE ( tf.Module ):
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
super(__UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=__UpperCamelCase , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.model.generate(
input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , max_new_tokens=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_02, 1_03]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__UpperCamelCase , __UpperCamelCase , signatures={'serving_default': dummy_model.serving} )
snake_case_ = tf.saved_model.load(__UpperCamelCase ).signatures['serving_default']
for input_row in range(len(__UpperCamelCase ) ):
snake_case_ = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**__UpperCamelCase )['sequences']
snake_case_ = test_model.generate(**__UpperCamelCase , max_new_tokens=__UpperCamelCase )
tf.debugging.assert_equal(__UpperCamelCase , __UpperCamelCase )
@slow
@require_tensorflow_text
def __lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=__UpperCamelCase )
class SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__UpperCamelCase , 'spiece.model' ) , 'rb' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def __lowerCAmelCase ( self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.tokenizer.tokenize(__UpperCamelCase )
snake_case_ , snake_case_ = text.pad_model_inputs(
__UpperCamelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
return self.tokenizer.detokenize(__UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
snake_case_ = complete_model(__UpperCamelCase )
snake_case_ = tf.keras.Model(__UpperCamelCase , __UpperCamelCase )
keras_model.save(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
snake_case_ = 14
snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 'Hello, my dog is cute and'
snake_case_ = tokenizer(__UpperCamelCase , return_tensors='tf' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_38, 1_98]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = 'Hugging Face is a technology company based in New York and Paris.'
snake_case_ = bart_tokenizer(__UpperCamelCase , return_tensors='tf' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = bart_model.generate(__UpperCamelCase ).numpy()
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ):
"""simple docstring"""
return super().call(__UpperCamelCase , **__UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = bart_model.generate(__UpperCamelCase , foo='bar' ).numpy()
self.assertTrue(np.array_equal(__UpperCamelCase , __UpperCamelCase ) )
class SCREAMING_SNAKE_CASE ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def __lowerCAmelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return super().call(__UpperCamelCase , **__UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(__UpperCamelCase ).numpy()
with self.assertRaises(__UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__UpperCamelCase , foo='bar' )
| 46 | 1 |
import math
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> float:
if (
not isinstance(lowerCamelCase_ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> float:
if (
not isinstance(lowerCamelCase_ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCamelCase : Tuple = "\nHuman: <<task>>\n\nAssistant: "
__lowerCamelCase : Tuple = "huggingface-tools/default-prompts"
__lowerCamelCase : List[str] = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , lowerCamelCase_ ) is not None:
return prompt_or_repo_id
UpperCAmelCase = cached_file(
lowerCamelCase_ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(lowerCamelCase_ , "r" , encoding="utf-8" ) as f:
return f.read()
| 323 | 1 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCamelCase_ : Dict = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls : str ):
UpperCamelCase_: Any = TOKEN
HfFolder.save_token(snake_case_ )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCamelCase_: Dict = FlaxBertModel(snake_case_ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
UpperCamelCase_: Dict = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
UpperCamelCase_: List[str] = flatten_dict(unfreeze(model.params ) )
UpperCamelCase_: Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase_: Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case_ , repo_id="""test-model-flax""" , push_to_hub=snake_case_ , use_auth_token=self._token )
UpperCamelCase_: Tuple = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
UpperCamelCase_: int = flatten_dict(unfreeze(model.params ) )
UpperCamelCase_: List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase_: str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1e-3 , msg=f'''{key} not identical''' )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCamelCase_: Dict = FlaxBertModel(snake_case_ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
UpperCamelCase_: Any = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
UpperCamelCase_: List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCamelCase_: Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase_: List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
snake_case_ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=snake_case_ , use_auth_token=self._token )
UpperCamelCase_: str = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
UpperCamelCase_: List[str] = flatten_dict(unfreeze(model.params ) )
UpperCamelCase_: Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase_: Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1e-3 , msg=f'''{key} not identical''' )
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: str = True
UpperCamelCase_: Optional[Any] = flatten_dict(modela.params )
UpperCamelCase_: int = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
UpperCamelCase_: Union[str, Any] = False
return models_are_equal
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCamelCase_: int = FlaxBertModel(snake_case_ )
UpperCamelCase_: Any = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case_ , snake_case_ ) )
with self.assertRaises(snake_case_ ):
UpperCamelCase_: List[Any] = FlaxBertModel.from_pretrained(snake_case_ )
UpperCamelCase_: Any = FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertTrue(check_models_equal(snake_case_ , snake_case_ ) )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCamelCase_: Tuple = FlaxBertModel(snake_case_ )
UpperCamelCase_: Tuple = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case_ , snake_case_ ) , max_shard_size="""10KB""" )
with self.assertRaises(snake_case_ ):
UpperCamelCase_: Optional[int] = FlaxBertModel.from_pretrained(snake_case_ )
UpperCamelCase_: List[str] = FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertTrue(check_models_equal(snake_case_ , snake_case_ ) )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[str] = """bert"""
UpperCamelCase_: Optional[int] = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(snake_case_ ):
UpperCamelCase_: int = FlaxBertModel.from_pretrained(snake_case_ )
UpperCamelCase_: List[str] = FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Optional[Any] = """bert"""
UpperCamelCase_: str = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(snake_case_ ):
UpperCamelCase_: Optional[int] = FlaxBertModel.from_pretrained(snake_case_ )
UpperCamelCase_: int = FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertIsNotNone(snake_case_ )
| 670 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 635 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : List[str] ):
UpperCamelCase_ : Optional[Any] = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
UpperCamelCase_ : Optional[Any] = DetaConfig(
backbone_config=_SCREAMING_SNAKE_CASE , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_SCREAMING_SNAKE_CASE , with_box_refine=_SCREAMING_SNAKE_CASE , two_stage=_SCREAMING_SNAKE_CASE , )
# set labels
UpperCamelCase_ : Dict = """huggingface/label-files"""
if "o365" in model_name:
UpperCamelCase_ : List[Any] = 366
UpperCamelCase_ : str = """object365-id2label.json"""
else:
UpperCamelCase_ : Any = 91
UpperCamelCase_ : Optional[int] = """coco-detection-id2label.json"""
UpperCamelCase_ : Union[str, Any] = num_labels
UpperCamelCase_ : int = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) ) , """r""" ) )
UpperCamelCase_ : Optional[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase_ : List[str] = idalabel
UpperCamelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
UpperCamelCase_ : List[str] = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ):
UpperCamelCase_ : Dict = dct.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : str = val
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple ):
UpperCamelCase_ : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase_ : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase_ : List[Any] = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
UpperCamelCase_ : Tuple = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ : Dict = in_proj_weight[:dim, :]
UpperCamelCase_ : Dict = in_proj_bias[: dim]
UpperCamelCase_ : Any = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase_ : Optional[int] = in_proj_bias[
dim : dim * 2
]
UpperCamelCase_ : List[str] = in_proj_weight[
-dim :, :
]
UpperCamelCase_ : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] ):
# transformer decoder self-attention layers
UpperCamelCase_ : List[Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase_ : str = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase_ : Optional[Any] = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ : Optional[Any] = in_proj_weight[:hidden_size, :]
UpperCamelCase_ : int = in_proj_bias[:hidden_size]
UpperCamelCase_ : Dict = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase_ : Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase_ : Dict = in_proj_weight[-hidden_size:, :]
UpperCamelCase_ : Dict = in_proj_bias[-hidden_size:]
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase_ : int = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] ):
UpperCamelCase_ : Tuple = get_deta_config(_SCREAMING_SNAKE_CASE )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase_ : int = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
UpperCamelCase_ : int = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
UpperCamelCase_ : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(_SCREAMING_SNAKE_CASE , param.shape )
# rename keys
UpperCamelCase_ : List[str] = create_rename_keys(_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(_SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase_ : Dict = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Optional[Any] = val
if "input_proj" in key:
UpperCamelCase_ : Tuple = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : int = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase_ : int = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : str = val
# finally, create HuggingFace model and load state dict
UpperCamelCase_ : List[Any] = DetaForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(_SCREAMING_SNAKE_CASE )
# load image processor
UpperCamelCase_ : Optional[Any] = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
UpperCamelCase_ : Optional[int] = prepare_img()
UpperCamelCase_ : Any = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
UpperCamelCase_ : int = encoding["""pixel_values"""]
UpperCamelCase_ : Tuple = model(pixel_values.to(_SCREAMING_SNAKE_CASE ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase_ : Optional[Any] = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
UpperCamelCase_ : Optional[Any] = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase_ : str = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
UpperCamelCase_ : Union[str, Any] = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_SCREAMING_SNAKE_CASE ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_SCREAMING_SNAKE_CASE ) , atol=1E-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 635 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
lowerCamelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
lowerCamelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
lowerCamelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCamelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCamelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Optional[int] = VOCAB_FILES_NAMES
A :Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A :Dict = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A :Optional[int] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :int = VOCAB_FILES_NAMES
A :List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A :int = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A :int = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowerCamelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowerCamelCase = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(SCREAMING_SNAKE_CASE )
class _a :
'''simple docstring'''
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
elif titles is None or texts is None:
a__ : List[Any] = titles if texts is None else texts
return super().__call__(
__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
a__ : Tuple = titles if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [titles]
a__ : Optional[int] = texts if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [texts]
a__ : List[str] = len(__UpperCAmelCase )
a__ : str = questions if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [questions] * n_passages
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
f'There should be as many titles than texts but got {len(__UpperCAmelCase )} titles and {len(__UpperCAmelCase )} texts.' )
a__ : str = super().__call__(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )["input_ids"]
a__ : Optional[int] = super().__call__(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )["input_ids"]
a__ : List[str] = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCAmelCase , __UpperCAmelCase )
]
}
if return_attention_mask is not False:
a__ : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
a__ : Dict = attention_mask
return self.pad(__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 16 , __UpperCAmelCase = 64 , __UpperCAmelCase = 4 , ):
"""simple docstring"""
a__ : str = reader_input["input_ids"]
a__ , a__ , a__ : List[Any] = reader_output[:3]
a__ : List[str] = len(__UpperCAmelCase )
a__ : List[Any] = sorted(range(__UpperCAmelCase ) , reverse=__UpperCAmelCase , key=relevance_logits.__getitem__ )
a__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
a__ : Optional[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
a__ : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a__ : Tuple = sequence_ids.index(self.pad_token_id )
else:
a__ : List[str] = len(__UpperCAmelCase )
a__ : int = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCAmelCase , top_spans=__UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCAmelCase , start_index=__UpperCAmelCase , end_index=__UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
"""simple docstring"""
a__ : List[str] = []
for start_index, start_score in enumerate(__UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
a__ : Any = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] , reverse=__UpperCAmelCase )
a__ : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'Wrong span indices: [{start_index}:{end_index}]' )
a__ : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Optional[int] = VOCAB_FILES_NAMES
A :Any = READER_PRETRAINED_VOCAB_FILES_MAP
A :str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A :Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
A :Optional[int] = ["input_ids", "attention_mask"]
| 207 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 207 | 1 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCamelCase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCamelCase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def A ( lowercase__ : list[list[int]] ) -> list[list[int]]:
UpperCamelCase__ :List[str] = []
for i in range(len(lowercase__ ) ):
UpperCamelCase__ :Optional[int] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
UpperCamelCase__ :Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowercase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowercase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowercase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
UpperCamelCase__ :List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowercase__ )
return next_generation
def A ( lowercase__ : list[list[int]] , lowercase__ : int ) -> list[Image.Image]:
UpperCamelCase__ :Tuple = []
for _ in range(lowercase__ ):
# Create output image
UpperCamelCase__ :str = Image.new("""RGB""" , (len(cells[0] ), len(lowercase__ )) )
UpperCamelCase__ :Any = img.load()
# Save cells to image
for x in range(len(lowercase__ ) ):
for y in range(len(cells[0] ) ):
UpperCamelCase__ :Dict = 255 - cells[y][x] * 255
UpperCamelCase__ :Union[str, Any] = (colour, colour, colour)
# Save image
images.append(lowercase__ )
UpperCamelCase__ :Union[str, Any] = new_generation(lowercase__ )
return images
if __name__ == "__main__":
UpperCamelCase = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:]) | 45 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_A : Optional[int] = object()
# For specifying empty leaf dict `{}`
_A : Tuple = object()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(lowerCAmelCase_ ) - len(lowerCAmelCase_ ) + 1 ):
SCREAMING_SNAKE_CASE__ = [x.match(lowerCAmelCase_ ) for x, y in zip(lowerCAmelCase_ , ks[i:] )]
if matches and all(lowerCAmelCase_ ):
return True
return False
def __snake_case ( lowerCAmelCase_ ) -> int:
def replace(lowerCAmelCase_ , lowerCAmelCase_ ):
for rule, replacement in rules:
if _match(lowerCAmelCase_ , lowerCAmelCase_ ):
return replacement
return val
return replace
def __snake_case ( ) -> Optional[int]:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , lowerCAmelCase_ )),
(("transformer", "wte", "embedding"), P('''mp''' , lowerCAmelCase_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCAmelCase_ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , lowerCAmelCase_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCAmelCase_ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , lowerCAmelCase_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __snake_case ( lowerCAmelCase_ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = _get_partition_rules()
SCREAMING_SNAKE_CASE__ = _replacement_rules(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {k: _unmatched for k in flatten_dict(lowerCAmelCase_ )}
SCREAMING_SNAKE_CASE__ = {k: replace(lowerCAmelCase_ , lowerCAmelCase_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCAmelCase_ ) )
| 100 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 489 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class lowerCAmelCase_ :
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
pass
def _snake_case ( self ) -> Dict:
pass
def _snake_case ( self ) -> int:
pass
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {"vision_model": vision_model, "text_model": text_model}
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowerCAmelCase = after_output[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1E-5 )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.get_pretrained_model_and_inputs()
_lowerCAmelCase = model_a(**_lowerCAmelCase )
_lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model_a(**_lowerCAmelCase )
_lowerCAmelCase = after_outputs[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1E-5 )
@require_tf
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = TFViTModel(_lowerCAmelCase , name="vision_model" )
_lowerCAmelCase = TFBertModel(_lowerCAmelCase , name="text_model" )
return vision_model, text_model
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = TFViTModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
def _snake_case ( self ) -> List[Any]:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ) -> int:
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
_lowerCAmelCase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = TFDeiTModel(_lowerCAmelCase , name="vision_model" )
_lowerCAmelCase = TFRobertaModel(_lowerCAmelCase , name="text_model" )
return vision_model, text_model
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = TFDeiTModelTester(self )
_lowerCAmelCase = TFRobertaModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
def _snake_case ( self ) -> Any:
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = TFCLIPVisionModel(_lowerCAmelCase , name="vision_model" )
_lowerCAmelCase = TFBertModel(_lowerCAmelCase , name="text_model" )
return vision_model, text_model
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = TFCLIPVisionModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
_lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCAmelCase = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="np" )
_lowerCAmelCase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCAmelCase = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1E-3 ) )
| 489 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = KandinskyInpaintPipeline
A__ : Dict = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
A__ : Any = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
A__ : Tuple = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
A__ : Dict = False
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return 32
@property
def _a ( self : int ):
"""simple docstring"""
return 32
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def _a ( self : str ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a ( self : List[str] ):
"""simple docstring"""
return 1_00
@property
def _a ( self : Any ):
"""simple docstring"""
A__ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _a ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
A__ = MultilingualCLIP(_snake_case )
A__ = text_encoder.eval()
return text_encoder
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
A__ = UNetaDConditionModel(**_snake_case )
return model
@property
def _a ( self : int ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_snake_case , set_alpha_to_one=_snake_case , steps_offset=1 , prediction_type='epsilon' , thresholding=_snake_case , )
A__ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _a ( self : str , _snake_case : int , _snake_case : List[str]=0 ):
"""simple docstring"""
A__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_snake_case )
# create init_image
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
A__ = np.ones((64, 64) , dtype=np.floataa )
A__ = 0
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def _a ( self : List[str] ):
"""simple docstring"""
A__ = 'cpu'
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**_snake_case )
A__ = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
A__ = pipe(**self.get_dummy_inputs(_snake_case ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
A__ = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _a ( self : Dict ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
A__ = np.ones((7_68, 7_68) , dtype=np.floataa )
A__ = 0
A__ = 'a hat'
A__ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
A__ = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
A__ = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ , A__ = pipe_prior(
_snake_case , generator=_snake_case , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
A__ = pipeline(
_snake_case , image=_snake_case , mask_image=_snake_case , image_embeds=_snake_case , negative_image_embeds=_snake_case , generator=_snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
A__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 9 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def _UpperCamelCase ( UpperCamelCase__ ):
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 407 | 0 |
def _lowercase ( ) -> Tuple:
lowerCamelCase =0
for i in range(1 , 10_01 ):
total += i**i
return str(_UpperCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 269 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __A ( a ):
@staticmethod
@abstractmethod
def _snake_case ( UpperCAmelCase_ ):
raise NotImplementedError()
@abstractmethod
def _snake_case ( self ):
raise NotImplementedError()
| 269 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
__UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Union[str, Any]:
_lowerCamelCase : Tuple = TextaTextGenerationPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE)
return generator, ["Something to write", "Something else"]
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : str = generator("""Something there""")
self.assertEqual(SCREAMING_SNAKE_CASE , [{"""generated_text""": ANY(SCREAMING_SNAKE_CASE)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there"""))
_lowerCamelCase : Dict = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=SCREAMING_SNAKE_CASE)
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[{"""generated_text""": ANY(SCREAMING_SNAKE_CASE)}, {"""generated_text""": ANY(SCREAMING_SNAKE_CASE)}],
[{"""generated_text""": ANY(SCREAMING_SNAKE_CASE)}, {"""generated_text""": ANY(SCREAMING_SNAKE_CASE)}],
] , )
_lowerCamelCase : int = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=SCREAMING_SNAKE_CASE)
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[{"""generated_text""": ANY(SCREAMING_SNAKE_CASE)}, {"""generated_text""": ANY(SCREAMING_SNAKE_CASE)}],
[{"""generated_text""": ANY(SCREAMING_SNAKE_CASE)}, {"""generated_text""": ANY(SCREAMING_SNAKE_CASE)}],
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE):
generator(4)
@require_torch
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : Union[str, Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""")
# do_sample=False necessary for reproducibility
_lowerCamelCase : Union[str, Any] = generator("""Something there""" , do_sample=SCREAMING_SNAKE_CASE)
self.assertEqual(SCREAMING_SNAKE_CASE , [{"""generated_text""": """"""}])
_lowerCamelCase : Dict = 3
_lowerCamelCase : List[str] = generator(
"""Something there""" , num_return_sequences=SCREAMING_SNAKE_CASE , num_beams=SCREAMING_SNAKE_CASE , )
_lowerCamelCase : Tuple = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = generator("""This is a test""" , do_sample=SCREAMING_SNAKE_CASE , num_return_sequences=2 , return_tensors=SCREAMING_SNAKE_CASE)
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"""generated_token_ids""": ANY(torch.Tensor)},
{"""generated_token_ids""": ANY(torch.Tensor)},
] , )
_lowerCamelCase : Union[str, Any] = generator.model.config.eos_token_id
_lowerCamelCase : List[Any] = """<pad>"""
_lowerCamelCase : str = generator(
["""This is a test""", """This is a second test"""] , do_sample=SCREAMING_SNAKE_CASE , num_return_sequences=2 , batch_size=2 , return_tensors=SCREAMING_SNAKE_CASE , )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[
{"""generated_token_ids""": ANY(torch.Tensor)},
{"""generated_token_ids""": ANY(torch.Tensor)},
],
[
{"""generated_token_ids""": ANY(torch.Tensor)},
{"""generated_token_ids""": ANY(torch.Tensor)},
],
] , )
@require_tf
def UpperCamelCase_ ( self) -> Tuple:
_lowerCamelCase : str = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""")
# do_sample=False necessary for reproducibility
_lowerCamelCase : List[Any] = generator("""Something there""" , do_sample=SCREAMING_SNAKE_CASE)
self.assertEqual(SCREAMING_SNAKE_CASE , [{"""generated_text""": """"""}])
| 88 |
import os
from datetime import datetime as dt
from github import Github
lowercase__ =[
'good first issue',
'feature request',
'wip',
]
def __UpperCamelCase ( ):
__a : int = Github(os.environ['''GITHUB_TOKEN'''] )
__a : Dict = g.get_repo('''huggingface/accelerate''' )
__a : Dict = repo.get_issues(state='''open''' )
for issue in open_issues:
__a : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
__a : str = comments[0] if len(lowerCAmelCase__ ) > 0 else None
__a : List[str] = dt.utcnow()
__a : Dict = (current_time - issue.updated_at).days
__a : Any = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 2_3
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 521 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
if not is_accelerate_available():
return method
_UpperCamelCase : str = version.parse(accelerate.__version__ ).base_version
if version.parse(UpperCAmelCase_ ) < version.parse('0.17.0' ):
return method
def wrapper(self : Tuple , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *UpperCAmelCase_ , **UpperCAmelCase_ )
return wrapper
| 706 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowercase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __snake_case):
super().__init__()
_UpperCamelCase : List[Any] = tokenizer
_UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case)
_UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case)
def A__ ( self , __snake_case):
_UpperCamelCase : Any = self.tokenizer(__snake_case)
_UpperCamelCase : Dict = self.bert(**__snake_case)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self):
super().setUp()
_UpperCamelCase : Optional[Any] = [
BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_UpperCamelCase : Optional[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1]))
def A__ ( self):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest')
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences)
_UpperCamelCase : Optional[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Tuple = tf.function(__snake_case)
for test_inputs in (self.test_sentences, self.paired_sentences):
_UpperCamelCase : Optional[int] = tf.constant(__snake_case)
_UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case)
_UpperCamelCase : Tuple = tf_tokenizer(__snake_case)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def A__ ( self):
for tf_tokenizer in self.tf_tokenizers:
_UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case)
_UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences)
_UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCamelCase : int = Path(__snake_case) / 'saved.model'
model.save(__snake_case)
_UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case)
_UpperCamelCase : int = loaded_model(__snake_case)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 648 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if number < 1:
__UpperCAmelCase : List[str] = f"""Input value of [number={number}] must be > 0"""
raise ValueError(__lowerCamelCase )
__UpperCAmelCase : int = 1
for i in range(1 , __lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
from math import loga
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 0 |
"""simple docstring"""
class __magic_name__ :
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = """"""
_lowerCAmelCase = """"""
_lowerCAmelCase = []
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_lowerCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_lowerCAmelCase = self.__min_dist_top_down_dp(A__ , n - 1 )
_lowerCAmelCase = self.__min_dist_top_down_dp(m - 1 , A__ )
_lowerCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_lowerCAmelCase = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = worda
_lowerCAmelCase = worda
_lowerCAmelCase = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = worda
_lowerCAmelCase = worda
_lowerCAmelCase = len(A__ )
_lowerCAmelCase = len(A__ )
_lowerCAmelCase = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_lowerCAmelCase = j
elif j == 0: # second string is empty
_lowerCAmelCase = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_lowerCAmelCase = self.dp[i - 1][j - 1]
else:
_lowerCAmelCase = self.dp[i][j - 1]
_lowerCAmelCase = self.dp[i - 1][j]
_lowerCAmelCase = self.dp[i - 1][j - 1]
_lowerCAmelCase = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
a__ : Union[str, Any] = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
a__ : int = input("""Enter the first string: """).strip()
a__ : List[str] = input("""Enter the second string: """).strip()
print()
print(f'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(f'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 720 |
"""simple docstring"""
import qiskit
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_lowerCAmelCase = qiskit.QuantumCircuit(__lowerCamelCase, __lowerCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1], [0, 1] )
# Execute the circuit on the qasm simulator
_lowerCAmelCase = qiskit.execute(__lowerCamelCase, __lowerCamelCase, shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
a__ : Optional[Any] = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 309 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
_snake_case = 'xlm-roberta-xl'
def __init__( self , a_=2_5_0_8_8_0 , a_=2_5_6_0 , a_=3_6 , a_=3_2 , a_=1_0_2_4_0 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_4 , a_=1 , a_=0.02 , a_=1e-0_5 , a_=1 , a_=0 , a_=2 , a_="absolute" , a_=True , a_=None , **a_ , ) -> Dict:
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowercase : str = vocab_size
lowercase : str = hidden_size
lowercase : Any = num_hidden_layers
lowercase : Union[str, Any] = num_attention_heads
lowercase : Union[str, Any] = hidden_act
lowercase : str = intermediate_size
lowercase : List[str] = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Any = type_vocab_size
lowercase : Optional[Any] = initializer_range
lowercase : Dict = layer_norm_eps
lowercase : Optional[Any] = position_embedding_type
lowercase : List[str] = use_cache
lowercase : Optional[Any] = classifier_dropout
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 372 |
def lowerCamelCase_ ( ):
_a : Optional[int] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_a : Dict = 6
_a : Any = 1
_a : Union[str, Any] = 1901
_a : Tuple = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_a : Tuple = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_a : Union[str, Any] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_a : str = day - days_per_month[month - 2]
if month > 12:
year += 1
_a : Optional[int] = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 471 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = [x.strip() for x in open(_lowerCamelCase ).readlines()]
UpperCAmelCase = [x.strip() for x in open(_lowerCamelCase ).readlines()][: len(_lowerCamelCase )]
UpperCAmelCase = calculate_rouge(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
if save_path is not None:
save_json(_lowerCamelCase , _lowerCamelCase , indent=_lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 717 |
from collections import Counter
from timeit import timeit
def __UpperCamelCase ( _lowerCAmelCase = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def __UpperCamelCase ( _lowerCAmelCase = "" ):
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
return True
UpperCAmelCase = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCAmelCase = {}
for character in lower_case_input_str:
UpperCAmelCase = character_freq_dict.get(_lowerCAmelCase , 0 ) + 1
UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __UpperCamelCase ( _lowerCAmelCase = "" ):
"""simple docstring"""
print("\nFor string = " , _lowerCAmelCase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
__lowerCAmelCase =input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
__lowerCAmelCase =can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 405 | 0 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> str:
_UpperCAmelCase : int = tmp_path / "file.csv"
_UpperCAmelCase : Union[str, Any] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(lowerCAmelCase , "w" ) as f:
f.write(lowerCAmelCase )
return str(lowerCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> str:
_UpperCAmelCase : str = tmp_path / "malformed_file.csv"
_UpperCAmelCase : Optional[Any] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(lowerCAmelCase , "w" ) as f:
f.write(lowerCAmelCase )
return str(lowerCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Any ) -> List[Any]:
_UpperCAmelCase : Optional[int] = tmp_path / "csv_with_image.csv"
_UpperCAmelCase : Any = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(lowerCAmelCase , "w" ) as f:
f.write(lowerCAmelCase )
return str(lowerCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> str:
_UpperCAmelCase : int = tmp_path / "csv_with_label.csv"
_UpperCAmelCase : Any = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(lowerCAmelCase , "w" ) as f:
f.write(lowerCAmelCase )
return str(lowerCAmelCase )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> Any:
_UpperCAmelCase : List[Any] = tmp_path / "csv_with_int_list.csv"
_UpperCAmelCase : Optional[Any] = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(lowerCAmelCase , "w" ) as f:
f.write(lowerCAmelCase )
return str(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[Any] , lowerCAmelCase: Tuple ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = Csv()
_UpperCAmelCase : Dict = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Optional[int]:
with open(lowerCAmelCase , encoding="utf-8" ) as f:
_UpperCAmelCase : Dict = f.read().splitlines()[1]
_UpperCAmelCase : List[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_UpperCAmelCase : int = csv._generate_tables([[csv_file_with_image]] )
_UpperCAmelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_UpperCAmelCase : List[str] = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> List[Any]:
with open(lowerCAmelCase , encoding="utf-8" ) as f:
_UpperCAmelCase : Union[str, Any] = f.read().splitlines()[1:]
_UpperCAmelCase : List[Any] = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_UpperCAmelCase : int = csv._generate_tables([[csv_file_with_label]] )
_UpperCAmelCase : Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_UpperCAmelCase : Optional[Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(lowerCAmelCase ) for label in labels]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> str:
_UpperCAmelCase : Any = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda lowerCAmelCase : [int(lowerCAmelCase ) for i in x.split()]} )
_UpperCAmelCase : Tuple = csv._generate_tables([[csv_file_with_int_list]] )
_UpperCAmelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_UpperCAmelCase : Dict = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 300 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: Any , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Any ) -> int:
_UpperCAmelCase : Optional[int] = s.rsplit(lowerCAmelCase , lowerCAmelCase )
return new.join(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> List[str]:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> List[str]:
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : Dict = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
_UpperCAmelCase : List[Any] = key.replace(F'{group_key}.' , F'{group_key}.group.' )
if "res_path" in key:
_UpperCAmelCase : List[Any] = key.replace("res_path." , "res_path.path." )
if key.endswith(".w" ):
_UpperCAmelCase : Union[str, Any] = rreplace(lowerCAmelCase , ".w" , ".weight" , 1 )
if key.endswith(".b" ):
_UpperCAmelCase : str = rreplace(lowerCAmelCase , ".b" , ".bias" , 1 )
_UpperCAmelCase : int = value.float()
return upgrade
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: List[Any] , lowerCAmelCase: List[str]=None , lowerCAmelCase: Tuple=True ) -> List[str]:
from dall_e import Encoder
_UpperCAmelCase : Optional[int] = Encoder()
if os.path.exists(lowerCAmelCase ):
_UpperCAmelCase : Dict = torch.load(lowerCAmelCase )
else:
_UpperCAmelCase : List[str] = torch.hub.load_state_dict_from_url(lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_UpperCAmelCase : List[str] = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase )
if config_path is not None:
_UpperCAmelCase : str = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase )
else:
_UpperCAmelCase : Optional[int] = FlavaImageCodebookConfig()
_UpperCAmelCase : List[Any] = FlavaImageCodebook(lowerCAmelCase ).eval()
_UpperCAmelCase : List[Any] = encoder.state_dict()
_UpperCAmelCase : Optional[int] = upgrade_state_dict(lowerCAmelCase )
hf_model.load_state_dict(lowerCAmelCase )
_UpperCAmelCase : Tuple = hf_model.state_dict()
_UpperCAmelCase : Optional[int] = count_parameters(lowerCAmelCase )
_UpperCAmelCase : List[Any] = count_parameters(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase )
else:
return hf_state_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 300 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : Tuple= {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str]= [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any]= [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_a : int= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702 | """simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_a : List[str]= yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
_a : Dict= {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_a : str= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : Tuple= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : Dict= {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_a : str= "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : Tuple= (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
_a : Optional[int]= "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : List[str]= (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
_a : str= "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : Optional[int]= "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
_a : int= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : str= "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
_a : int= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
_a : Optional[Any]= "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
_a : Dict= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
_a : Optional[Any]= "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
_a : int= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
_a : Optional[Any]= "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
_a : Optional[int]= "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : Any= "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
_a : Tuple= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
_a : List[str]= "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
_a : List[Any]= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : Tuple= "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
_a : Optional[Any]= ""
_a : Tuple= "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
_a : Union[str, Any]= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : List[Any]= "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
assert ReadMe.from_string(UpperCAmelCase_ , UpperCAmelCase_ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
with pytest.raises(UpperCAmelCase_ , match=re.escape(expected_error.format(path='root' ) ) ):
__snake_case : Tuple = ReadMe.from_string(UpperCAmelCase_ , UpperCAmelCase_ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
with pytest.raises(UpperCAmelCase_ , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
ReadMe.from_string(UpperCAmelCase_ , UpperCAmelCase_ , suppress_parsing_errors=UpperCAmelCase_ )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Union[str, Any] = Path(UpperCAmelCase_ ) / 'README.md'
with open(UpperCAmelCase_ , 'w+' ) as readme_file:
readme_file.write(UpperCAmelCase_ )
__snake_case : Optional[Any] = ReadMe.from_readme(UpperCAmelCase_ , UpperCAmelCase_ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[int] = Path(UpperCAmelCase_ ) / 'README.md'
with open(UpperCAmelCase_ , 'w+' ) as readme_file:
readme_file.write(UpperCAmelCase_ )
__snake_case : List[Any] = expected_error.format(path=UpperCAmelCase_ )
with pytest.raises(UpperCAmelCase_ , match=re.escape(UpperCAmelCase_ ) ):
__snake_case : Any = ReadMe.from_readme(UpperCAmelCase_ , UpperCAmelCase_ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Tuple = Path(UpperCAmelCase_ ) / 'README.md'
with open(UpperCAmelCase_ , 'w+' ) as readme_file:
readme_file.write(UpperCAmelCase_ )
__snake_case : List[str] = expected_error.format(path=UpperCAmelCase_ )
with pytest.raises(UpperCAmelCase_ , match=re.escape(UpperCAmelCase_ ) ):
ReadMe.from_readme(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[int] = Path(UpperCAmelCase_ ) / 'README.md'
with open(UpperCAmelCase_ , 'w+' ) as readme_file:
readme_file.write(UpperCAmelCase_ )
ReadMe.from_readme(UpperCAmelCase_ , UpperCAmelCase_ , suppress_parsing_errors=UpperCAmelCase_ )
| 192 | 0 |
import functools
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[int] , UpperCamelCase__: list[int] ):
# Validation
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(UpperCamelCase__ ) != 3 or not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(UpperCamelCase__ ) == 0:
return 0
if min(UpperCamelCase__ ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(UpperCamelCase__ ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
SCREAMING_SNAKE_CASE__ = set(UpperCamelCase__ )
@functools.cache
def dynamic_programming(UpperCamelCase__: int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Optional[int] = list(snake_case__ )
A_ : List[Any] = list(snake_case__ )
A_ : List[str] = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count += 1
A_ : Optional[int] = """_"""
if count > 1:
return False
else:
return "".join(snake_case__ )
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = []
while True:
A_ : Any = ["""$"""] * len(snake_case__ )
A_ : int = []
for i in range(len(snake_case__ ) ):
for j in range(i + 1 , len(snake_case__ ) ):
A_ : int = compare_string(binary[i] , binary[j] )
if k is False:
A_ : Optional[int] = """*"""
A_ : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case__ ) == 0:
return pi
A_ : Tuple = list(set(snake_case__ ) )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : str = []
for minterm in minterms:
A_ : Any = """"""
for _ in range(snake_case__ ):
A_ : int = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case__ )
return temp
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : str = list(snake_case__ )
A_ : List[Any] = list(snake_case__ )
A_ : Optional[Any] = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : str = []
A_ : Tuple = [0] * len(snake_case__ )
for i in range(len(chart[0] ) ):
A_ : Optional[int] = 0
A_ : List[str] = -1
for j in range(len(snake_case__ ) ):
if chart[j][i] == 1:
count += 1
A_ : Union[str, Any] = j
if count == 1:
A_ : str = 1
for i in range(len(snake_case__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case__ ) ):
A_ : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
A_ : List[str] = 0
A_ : List[str] = -1
A_ : Union[str, Any] = 0
for i in range(len(snake_case__ ) ):
A_ : List[str] = chart[i].count(1 )
if count_n > max_n:
A_ : Union[str, Any] = count_n
A_ : Optional[int] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case__ ) ):
A_ : Optional[Any] = 0
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Optional[int] = [[0 for x in range(len(snake_case__ ) )] for x in range(len(snake_case__ ) )]
for i in range(len(snake_case__ ) ):
A_ : List[str] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__ ):
A_ : Union[str, Any] = 1
return chart
def __UpperCamelCase ( ):
A_ : Union[str, Any] = int(input("""Enter the no. of variables\n""" ) )
A_ : List[Any] = [
float(snake_case__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
A_ : List[str] = decimal_to_binary(snake_case__ , snake_case__ )
A_ : str = check(snake_case__ )
print("""Prime Implicants are:""" )
print(snake_case__ )
A_ : List[str] = prime_implicant_chart(snake_case__ , snake_case__ )
A_ : int = selection(snake_case__ , snake_case__ )
print("""Essential Prime Implicants are:""" )
print(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 180 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ ,lowerCAmelCase_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
lowerCAmelCase_ : Union[str, Any] = 'A painting of a squirrel eating a burger'
lowerCAmelCase_ : Tuple = jax.device_count()
lowerCAmelCase_ : Tuple = num_samples * [prompt]
lowerCAmelCase_ : List[Any] = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = replicate(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = shard(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = jax.random.PRNGKey(0 )
lowerCAmelCase_ : Tuple = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count() )
lowerCAmelCase_ : Dict = sd_pipe(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_inference_steps=2_5 , jit=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase_ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ : Optional[int] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase_ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Optional[Any] = 'stabilityai/stable-diffusion-2'
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder='scheduler' )
lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , revision='bf16' , dtype=jnp.bfloataa , )
lowerCAmelCase_ : Tuple = scheduler_params
lowerCAmelCase_ : Tuple = 'A painting of a squirrel eating a burger'
lowerCAmelCase_ : Union[str, Any] = jax.device_count()
lowerCAmelCase_ : Union[str, Any] = num_samples * [prompt]
lowerCAmelCase_ : List[str] = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = replicate(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = shard(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = jax.random.PRNGKey(0 )
lowerCAmelCase_ : List[str] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count() )
lowerCAmelCase_ : Optional[int] = sd_pipe(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_inference_steps=2_5 , jit=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase_ : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ : Optional[int] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase_ : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ : Optional[int] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 317 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ : List[str] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ : Tuple = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ , param_name='crop_size' )
lowerCAmelCase_ : Any = do_resize
lowerCAmelCase_ : Dict = do_rescale
lowerCAmelCase_ : int = do_normalize
lowerCAmelCase_ : int = do_center_crop
lowerCAmelCase_ : Any = crop_size
lowerCAmelCase_ : Any = size
lowerCAmelCase_ : str = resample
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase_ : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "shortest_edge" in size:
lowerCAmelCase_ : List[str] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowerCAmelCase_ : List[str] = (size['height'], size['width'])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase_ : str = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Any ):
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : int = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[float] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : Any = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='crop_size' , default_to_square=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = resample if resample is not None else self.resample
lowerCAmelCase_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : Dict = size if size is not None else self.size
lowerCAmelCase_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if not is_batched(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : str = [images]
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowerCAmelCase_ : int = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
lowerCAmelCase_ : List[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowerCAmelCase_ : Optional[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
lowerCAmelCase_ : Tuple = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase_ : Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase_ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 317 | 1 |
from math import pow
def a__ ( A__, A__, A__, A__, A__, ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
SCREAMING_SNAKE_CASE_ : List[Any] = int(pow(A__, A__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = backtrack(
A__, A__, current_number + 1, A__, A__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = backtrack(
A__, A__, current_number + 1, A__, A__ )
return current_sum, solutions_count
def a__ ( A__, A__ ):
if not (1 <= needed_sum <= 1_0_0_0 and 2 <= power <= 1_0):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(A__, A__, 1, 0, 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
A_ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
A_ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
A_ = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 141 | 0 |
from collections import Counter
from timeit import timeit
def A_ ( snake_case : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def A_ ( snake_case : str = "" ) -> bool:
'''simple docstring'''
if len(snake_case ) == 0:
return True
__UpperCamelCase = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__UpperCamelCase = {}
for character in lower_case_input_str:
__UpperCamelCase = character_freq_dict.get(snake_case , 0 ) + 1
__UpperCamelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A_ ( snake_case : str = "" ) -> None:
'''simple docstring'''
print('''\nFor string = ''' , snake_case , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(snake_case ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(snake_case ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
lowercase__ : Tuple = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
lowercase__ : Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 451 |
from collections import Counter
from timeit import timeit
def A_ ( snake_case : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def A_ ( snake_case : str = "" ) -> bool:
'''simple docstring'''
if len(snake_case ) == 0:
return True
__UpperCamelCase = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__UpperCamelCase = {}
for character in lower_case_input_str:
__UpperCamelCase = character_freq_dict.get(snake_case , 0 ) + 1
__UpperCamelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A_ ( snake_case : str = "" ) -> None:
'''simple docstring'''
print('''\nFor string = ''' , snake_case , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(snake_case ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(snake_case ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
lowercase__ : Tuple = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
lowercase__ : Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 451 | 1 |
"""simple docstring"""
import os
def _lowerCamelCase ( __a = "matrix.txt" ):
with open(os.path.join(os.path.dirname(__a ), __a ) ) as in_file:
SCREAMING_SNAKE_CASE_ = in_file.read()
SCREAMING_SNAKE_CASE_ = [[int(__a ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE_ = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE_ = len(grid[0] )
SCREAMING_SNAKE_CASE_ = [[0 for i in range(__a )] for j in range(__a )]
SCREAMING_SNAKE_CASE_ = grid[0][0]
for i in range(1, __a ):
SCREAMING_SNAKE_CASE_ = grid[0][i] + dp[0][i - 1]
for i in range(1, __a ):
SCREAMING_SNAKE_CASE_ = grid[i][0] + dp[i - 1][0]
for i in range(1, __a ):
for j in range(1, __a ):
SCREAMING_SNAKE_CASE_ = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''') | 626 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = None
UpperCAmelCase__ = BloomTokenizerFast
UpperCAmelCase__ = BloomTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = '''tokenizer_file'''
UpperCAmelCase__ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
SCREAMING_SNAKE_CASE_ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
SCREAMING_SNAKE_CASE_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ )['''input_ids''']
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
SCREAMING_SNAKE_CASE_ = '''This is a simple input'''
SCREAMING_SNAKE_CASE_ = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE_ = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE_ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
SCREAMING_SNAKE_CASE_ = None # Hotfixing padding = None
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = next(iter(SCREAMING_SNAKE_CASE_ ) )['''premise'''] # pick up one data
SCREAMING_SNAKE_CASE_ = list(sample_data.values() )
SCREAMING_SNAKE_CASE_ = list(map(tokenizer.encode , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) for x in output_tokens]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 ) | 626 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = AltDiffusionPipeline
_UpperCamelCase = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__lowerCAmelCase : str = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__lowerCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
__lowerCAmelCase : int = CLIPTextModel(A_ )
__lowerCAmelCase : List[str] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__lowerCAmelCase : List[Any] = 77
__lowerCAmelCase : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self , A_ , A_=0 ) ->Any:
'''simple docstring'''
if str(A_ ).startswith('''mps''' ):
__lowerCAmelCase : Optional[Any] = torch.manual_seed(A_ )
else:
__lowerCAmelCase : Any = torch.Generator(device=A_ ).manual_seed(A_ )
__lowerCAmelCase : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Optional[Any] = self.get_dummy_components()
torch.manual_seed(0 )
__lowerCAmelCase : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowerCAmelCase : Any = RobertaSeriesModelWithTransformation(A_ )
__lowerCAmelCase : Union[str, Any] = text_encoder
__lowerCAmelCase : Union[str, Any] = AltDiffusionPipeline(**A_ )
__lowerCAmelCase : Union[str, Any] = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Dict = self.get_dummy_inputs(A_ )
__lowerCAmelCase : Any = '''A photo of an astronaut'''
__lowerCAmelCase : str = alt_pipe(**A_ )
__lowerCAmelCase : Any = output.images
__lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : List[str] = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : Any = PNDMScheduler(skip_prk_steps=A_ )
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowerCAmelCase : int = RobertaSeriesModelWithTransformation(A_ )
__lowerCAmelCase : int = text_encoder
__lowerCAmelCase : List[str] = AltDiffusionPipeline(**A_ )
__lowerCAmelCase : List[str] = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Dict = self.get_dummy_inputs(A_ )
__lowerCAmelCase : Union[str, Any] = alt_pipe(**A_ )
__lowerCAmelCase : Any = output.images
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : int = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Any = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=A_ )
__lowerCAmelCase : Dict = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : int = '''A painting of a squirrel eating a burger'''
__lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
__lowerCAmelCase : str = alt_pipe([prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
__lowerCAmelCase : Dict = output.images
__lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase : List[str] = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
__lowerCAmelCase : str = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=A_ , safety_checker=A_ )
__lowerCAmelCase : Optional[int] = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Dict = '''A painting of a squirrel eating a burger'''
__lowerCAmelCase : List[Any] = torch.manual_seed(0 )
__lowerCAmelCase : Union[str, Any] = alt_pipe([prompt] , generator=A_ , num_inference_steps=2 , output_type='''numpy''' )
__lowerCAmelCase : List[str] = output.images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase : Optional[int] = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 583 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """time_series_transformer"""
_UpperCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = [1, 2, 3, 4, 5, 6, 7] , A_ = "mean" , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 32 , A_ = 32 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = True , A_ = "gelu" , A_ = 64 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.02 , A_=True , **A_ , ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = prediction_length
__lowerCAmelCase : Tuple = context_length or prediction_length
__lowerCAmelCase : str = distribution_output
__lowerCAmelCase : Any = loss
__lowerCAmelCase : List[str] = input_size
__lowerCAmelCase : Any = num_time_features
__lowerCAmelCase : Optional[int] = lags_sequence
__lowerCAmelCase : Any = scaling
__lowerCAmelCase : Dict = num_dynamic_real_features
__lowerCAmelCase : Any = num_static_real_features
__lowerCAmelCase : Optional[Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__lowerCAmelCase : Any = cardinality
else:
__lowerCAmelCase : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__lowerCAmelCase : List[Any] = embedding_dimension
else:
__lowerCAmelCase : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCAmelCase : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
__lowerCAmelCase : List[Any] = input_size * len(A_ ) + self._number_of_features
__lowerCAmelCase : int = d_model
__lowerCAmelCase : List[Any] = encoder_attention_heads
__lowerCAmelCase : int = decoder_attention_heads
__lowerCAmelCase : Tuple = encoder_ffn_dim
__lowerCAmelCase : int = decoder_ffn_dim
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : List[Any] = decoder_layers
__lowerCAmelCase : Dict = dropout
__lowerCAmelCase : int = attention_dropout
__lowerCAmelCase : Optional[int] = activation_dropout
__lowerCAmelCase : Optional[Any] = encoder_layerdrop
__lowerCAmelCase : str = decoder_layerdrop
__lowerCAmelCase : Optional[Any] = activation_function
__lowerCAmelCase : Optional[Any] = init_std
__lowerCAmelCase : int = use_cache
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 583 | 1 |
def A ( _SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
raise ValueError("check_bouncy() accepts only integer arguments" )
lowerCamelCase : Optional[int] = str(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = "".join(sorted(_SCREAMING_SNAKE_CASE ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def A ( _SCREAMING_SNAKE_CASE = 99 ) -> int:
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
lowerCamelCase : int = 0
lowerCamelCase : Optional[Any] = 1
while True:
if check_bouncy(_SCREAMING_SNAKE_CASE ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 311 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """vit"""
def __init__( self , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=224 , UpperCamelCase__=16 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=16 , **UpperCamelCase__ , ) -> Union[str, Any]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : int = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : int = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Union[str, Any] = patch_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Union[str, Any] = qkv_bias
lowerCamelCase : Union[str, Any] = encoder_stride
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Any = version.parse("""1.11""" )
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase ( self ) -> float:
return 1e-4
| 311 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
'''simple docstring'''
__magic_name__: Union[str, Any] = "markuplm"
def __init__( self : Optional[int] , _A : Union[str, Any]=30522 , _A : Optional[Any]=768 , _A : Tuple=12 , _A : List[Any]=12 , _A : Tuple=3072 , _A : Union[str, Any]="gelu" , _A : str=0.1 , _A : List[str]=0.1 , _A : Optional[int]=512 , _A : List[Any]=2 , _A : List[Any]=0.0_2 , _A : List[str]=1E-12 , _A : Any=0 , _A : str=0 , _A : Dict=2 , _A : List[Any]=256 , _A : str=1024 , _A : Any=216 , _A : List[str]=1001 , _A : Optional[Any]=32 , _A : int=50 , _A : str="absolute" , _A : int=True , _A : Any=None , **_A : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A , )
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[int] = hidden_size
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : int = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : str = type_vocab_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : List[str] = position_embedding_type
snake_case_ : int = use_cache
snake_case_ : List[str] = classifier_dropout
# additional properties
snake_case_ : int = max_depth
snake_case_ : Tuple = max_xpath_tag_unit_embeddings
snake_case_ : Dict = max_xpath_subs_unit_embeddings
snake_case_ : Tuple = tag_pad_id
snake_case_ : List[str] = subs_pad_id
snake_case_ : str = xpath_unit_hidden_size
| 706 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if "xprophetnet" in prophetnet_checkpoint_path:
snake_case_ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(__a )
snake_case_ ,snake_case_ : List[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__a , output_loading_info=__a )
else:
snake_case_ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(__a )
snake_case_ ,snake_case_ : Dict = ProphetNetForConditionalGeneration.from_pretrained(
__a , output_loading_info=__a )
snake_case_ : str = ['key_proj', 'value_proj', 'query_proj']
snake_case_ : List[str] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
snake_case_ : Union[str, Any] = key.split('.' )
if attributes[0] == "lm_head":
snake_case_ : Optional[Any] = prophet
snake_case_ : Any = prophet_old
else:
snake_case_ : Optional[int] = prophet.prophetnet
snake_case_ : str = prophet_old.model
snake_case_ : Union[str, Any] = False
for attribute in attributes:
if attribute in mapping:
snake_case_ : Optional[Any] = mapping[attribute]
if not hasattr(__a , __a ) and len(__a ) > 0:
snake_case_ : List[Any] = attribute
elif hasattr(__a , __a ):
snake_case_ : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
snake_case_ : int = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
snake_case_ : Optional[int] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
snake_case_ : List[str] = old_model.bias
logger.info(f"""{attribute} is initialized""" )
snake_case_ : int = True
break
elif attribute in special_keys and hasattr(__a , 'in_proj_weight' ):
snake_case_ : Optional[Any] = old_model.in_proj_weight.shape[0] // 3
snake_case_ : List[Any] = getattr(__a , __a )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
snake_case_ : Tuple = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
snake_case_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
snake_case_ : int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
snake_case_ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
snake_case_ : List[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
snake_case_ : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
snake_case_ : Any = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
snake_case_ : Any = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
snake_case_ : List[Any] = True
break
if attribute.isdigit():
snake_case_ : Any = model[int(__a )]
snake_case_ : Any = old_model[int(__a )]
else:
snake_case_ : str = getattr(__a , __a )
if old_attribute == "":
snake_case_ : Tuple = old_model
else:
if not hasattr(__a , __a ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
snake_case_ : List[str] = getattr(__a , __a )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(__a )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 534 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class a ( lowerCamelCase_ ):
snake_case__ = '''unispeech-sat'''
def __init__( self , _snake_case=32 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=1E-5 , _snake_case="group" , _snake_case="gelu" , _snake_case=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _snake_case=(5, 2, 2, 2, 2, 2, 2) , _snake_case=(10, 3, 3, 3, 3, 2, 2) , _snake_case=False , _snake_case=1_28 , _snake_case=16 , _snake_case=False , _snake_case=True , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=3_20 , _snake_case=2 , _snake_case=0.1 , _snake_case=1_00 , _snake_case=2_56 , _snake_case=2_56 , _snake_case=0.1 , _snake_case="mean" , _snake_case=False , _snake_case=False , _snake_case=2_56 , _snake_case=(5_12, 5_12, 5_12, 5_12, 15_00) , _snake_case=(5, 3, 3, 1, 1) , _snake_case=(1, 2, 3, 1, 1) , _snake_case=5_12 , _snake_case=0 , _snake_case=1 , _snake_case=2 , _snake_case=5_04 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case )
lowerCAmelCase = hidden_size
lowerCAmelCase = feat_extract_norm
lowerCAmelCase = feat_extract_activation
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = conv_bias
lowerCAmelCase = num_conv_pos_embeddings
lowerCAmelCase = num_conv_pos_embedding_groups
lowerCAmelCase = len(self.conv_dim )
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = feat_proj_dropout
lowerCAmelCase = final_dropout
lowerCAmelCase = layerdrop
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = vocab_size
lowerCAmelCase = num_clusters
lowerCAmelCase = do_stable_layer_norm
lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase = apply_spec_augment
lowerCAmelCase = mask_time_prob
lowerCAmelCase = mask_time_length
lowerCAmelCase = mask_time_min_masks
lowerCAmelCase = mask_feature_prob
lowerCAmelCase = mask_feature_length
lowerCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase = num_codevectors_per_group
lowerCAmelCase = num_codevector_groups
lowerCAmelCase = contrastive_logits_temperature
lowerCAmelCase = feat_quantizer_dropout
lowerCAmelCase = num_negatives
lowerCAmelCase = codevector_dim
lowerCAmelCase = proj_codevector_dim
lowerCAmelCase = diversity_loss_weight
# ctc loss
lowerCAmelCase = ctc_loss_reduction
lowerCAmelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = xvector_output_dim
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 173 | 0 |
"""simple docstring"""
import math
def a__ ( __lowercase , __lowercase ) -> int:
_A = len(__lowercase )
_A = int(math.floor(math.sqrt(__lowercase ) ) )
_A = 0
while arr[min(__lowercase , __lowercase ) - 1] < x:
_A = step
step += int(math.floor(math.sqrt(__lowercase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_A = prev + 1
if prev == min(__lowercase , __lowercase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
a_ = input("Enter numbers separated by a comma:\n").strip()
a_ = [int(item) for item in user_input.split(",")]
a_ = int(input("Enter the number to be searched:\n"))
a_ = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''') | 621 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = "roberta"
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = "transformer"
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = f'''{prefix}.embeddings.{w}.weight'''
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
a_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[f'''lm_head.dense.{w}''']
a_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[f'''{prefix}.ln_f.{w}''']
a_ = state_dict["lm_head.weight"]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint) | 621 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
_a = old_name
if "patch_embed" in old_name:
_a , _a , _a = old_name.split('.' )
if layer == "0":
_a = old_name.replace('0' , 'convolution1' )
elif layer == "1":
_a = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
_a = old_name.replace('3' , 'convolution2' )
else:
_a = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(R'\d\.\d' , _UpperCAmelCase ):
_a = R'\b\d{2}\b'
if bool(re.search(_UpperCAmelCase , _UpperCAmelCase ) ):
_a = re.search(R'\d\.\d\d.' , _UpperCAmelCase ).group()
else:
_a = re.search(R'\d\.\d.' , _UpperCAmelCase ).group()
if int(match[0] ) < 6:
_a = old_name.replace(_UpperCAmelCase , '' )
_a = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
_a = 'intermediate_stages.' + trimmed_name
else:
_a = old_name.replace(_UpperCAmelCase , '' )
if int(match[2] ) < num_meta4D_last_stage:
_a = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
_a = str(int(match[2] ) - num_meta4D_last_stage )
_a = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
_a = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
_a = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
_a = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
_a = trimmed_name.replace('fc2' , 'linear_out' )
_a = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(R'.\d.' , _UpperCAmelCase ):
_a = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
_a = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_a = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_a = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
_a = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
_a = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
_a = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
_a = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_a = new_name.replace('norm' , 'layernorm' )
_a = 'efficientformer.' + new_name
else:
_a = 'efficientformer.encoder.' + new_name
return new_name
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
for key in checkpoint.copy().keys():
_a = checkpoint.pop(_UpperCAmelCase )
_a = val
return checkpoint
def SCREAMING_SNAKE_CASE ( ) -> int:
_a = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
_a = torch.load(_UpperCAmelCase , map_location='cpu' )['model']
_a = EfficientFormerConfig.from_json_file(_UpperCAmelCase )
_a = EfficientFormerForImageClassificationWithTeacher(_UpperCAmelCase )
_a = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
_a = config.depths[-1] - config.num_metaad_blocks + 1
_a = convert_torch_checkpoint(_UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
_a = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
_a = prepare_img()
_a = 256
_a = 224
_a = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
_a = processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
# original processing pipeline
_a = Compose(
[
Resize(_UpperCAmelCase , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(_UpperCAmelCase ),
ToTensor(),
Normalize(_UpperCAmelCase , _UpperCAmelCase ),
] )
_a = image_transforms(_UpperCAmelCase ).unsqueeze(0 )
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
_a = model(_UpperCAmelCase )
_a = outputs.logits
_a = (1, 1000)
if "l1" in model_name:
_a = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _UpperCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_a = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _UpperCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_a = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(_UpperCAmelCase )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
lowercase_ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 562 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase_ = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
_a = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_a = True
# Deal with multi-line cases
elif (
re.search(
Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _UpperCAmelCase , )
is not None
):
_a = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_a = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_a = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
_a = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
_a = True
if not attribute_used:
_a = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_a = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_a = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_a = True
elif attribute.endswith('_token_id' ):
_a = True
# configuration class specific cases
if not case_allowed:
_a = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_a = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict:
_a = dict(inspect.signature(config_class.__init__ ).parameters )
_a = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
_a = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_a = {}
if len(config_class.attribute_map ) > 0:
_a = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_a = inspect.getsourcefile(_UpperCAmelCase )
_a = os.path.dirname(_UpperCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_a = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for fn in os.listdir(_UpperCAmelCase ) if fn.startswith('modeling_' )]
# Get the source code strings
_a = []
for path in modeling_paths:
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase ) as fp:
modeling_sources.append(fp.read() )
_a = []
for config_param, default_value in zip(_UpperCAmelCase , _UpperCAmelCase ):
# `attributes` here is all the variant names for `config_param`
_a = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> str:
_a = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_a = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _UpperCAmelCase : inspect.isclass(_UpperCAmelCase )
and issubclass(_UpperCAmelCase , _UpperCAmelCase )
and inspect.getmodule(_UpperCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_a = check_config_attributes_being_used(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
_a = unused_attributes
if len(_UpperCAmelCase ) > 0:
_a = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 562 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict=13 , __UpperCamelCase : Union[str, Any]=10 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : int=2 , __UpperCamelCase : str=2 , __UpperCamelCase : List[str]=True , __UpperCamelCase : int=True , __UpperCamelCase : Tuple=32 , __UpperCamelCase : str=5 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : List[Any]=37 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=0.02 , __UpperCamelCase : Any="divided_space_time" , __UpperCamelCase : Optional[int]=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = attention_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = (num_frames) * self.num_patches_per_frame + 1
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_UpperCAmelCase = self.num_labels
return config
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : Any ):
_UpperCAmelCase = TimesformerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : List[str] ):
_UpperCAmelCase = TimesformerForVideoClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
# verify the logits shape
_UpperCAmelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __UpperCamelCase )
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[str] = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : int = False
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = TimesformerModelTester(self )
_UpperCAmelCase = ConfigTester(
self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict=False ):
_UpperCAmelCase = copy.deepcopy(__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def UpperCAmelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
pass
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__UpperCamelCase )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TimesformerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
if not self.has_attentions:
pass
else:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = self.model_tester.seq_length
_UpperCAmelCase = self.model_tester.num_frames
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_UpperCAmelCase = len(__UpperCamelCase )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(__UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
def check_hidden_states_output(__UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
_UpperCAmelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( ) -> Union[str, Any]:
_UpperCAmelCase = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
_UpperCAmelCase = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Optional[Any] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_video()
_UpperCAmelCase = image_processor(video[:8] , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 129 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = filter(lambda _lowerCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowerCAmelCase = logging.getLogger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
if metric == "rouge2":
_UpperCAmelCase = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_UpperCAmelCase = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_UpperCAmelCase = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_UpperCAmelCase = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_lowerCAmelCase , filename=_lowerCAmelCase , monitor=F'''val_{metric}''' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=_lowerCAmelCase , verbose=_lowerCAmelCase , )
class __SCREAMING_SNAKE_CASE ( pl.Callback):
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
_UpperCAmelCase = {F'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__UpperCamelCase )
@rank_zero_only
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule , __UpperCamelCase : str , __UpperCamelCase : Optional[int]=True ):
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase = od / "test_results.txt"
_UpperCAmelCase = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_UpperCAmelCase = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__UpperCamelCase )
generations_file.parent.mkdir(exist_ok=__UpperCamelCase )
with open(__UpperCamelCase , "a+" ) as writer:
for key in sorted(__UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(__UpperCamelCase , torch.Tensor ):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F'''{key}: {val:.6f}\n'''
writer.write(__UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__UpperCamelCase )
@rank_zero_only
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : str ):
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(__UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__UpperCamelCase , __UpperCamelCase , "test" )
@rank_zero_only
def UpperCAmelCase__ ( self : str , __UpperCamelCase : pl.Trainer , __UpperCamelCase : List[str] ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 129 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : Tuple = 16
_SCREAMING_SNAKE_CASE : List[Any] = 32
def __lowerCAmelCase ( __magic_name__ , __magic_name__ = 1_6 ):
_lowercase: Optional[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowercase: List[str] = load_dataset("glue" , "mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
_lowercase: Tuple = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowercase: Optional[Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase: Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowercase: Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowercase: str = 1_6
elif accelerator.mixed_precision != "no":
_lowercase: Union[str, Any] = 8
else:
_lowercase: Union[str, Any] = None
return tokenizer.pad(
__magic_name__ , padding="longest" , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors="pt" , )
# Instantiate dataloaders.
_lowercase: str = DataLoader(
tokenized_datasets["train"] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
_lowercase: Optional[int] = DataLoader(
tokenized_datasets["validation"] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : List[Any] = mocked_dataloaders # noqa: F811
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __magic_name__ ) == "1":
_lowercase: Union[str, Any] = 2
# New Code #
_lowercase: Union[str, Any] = int(args.gradient_accumulation_steps )
_lowercase: Union[str, Any] = int(args.local_sgd_steps )
# Initialize accelerator
_lowercase: Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__magic_name__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase: Optional[int] = config["lr"]
_lowercase: Dict = int(config["num_epochs"] )
_lowercase: Any = int(config["seed"] )
_lowercase: Optional[Any] = int(config["batch_size"] )
_lowercase: Optional[Any] = evaluate.load("glue" , "mrpc" )
set_seed(__magic_name__ )
_lowercase , _lowercase: int = get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase: Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowercase: Dict = model.to(accelerator.device )
# Instantiate optimizer
_lowercase: Tuple = AdamW(params=model.parameters() , lr=__magic_name__ )
# Instantiate scheduler
_lowercase: str = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=1_0_0 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase: Optional[int] = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
with LocalSGD(
accelerator=__magic_name__ , model=__magic_name__ , local_sgd_steps=__magic_name__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__magic_name__ ):
_lowercase: Union[str, Any] = model(**__magic_name__ )
_lowercase: Tuple = output.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase: List[str] = model(**__magic_name__ )
_lowercase: Optional[Any] = outputs.logits.argmax(dim=-1 )
_lowercase , _lowercase: List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
_lowercase: str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , __magic_name__ )
def __lowerCAmelCase ( ):
_lowercase: List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__magic_name__ , default=__magic_name__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=__magic_name__ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=__magic_name__ , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_lowercase: Dict = parser.parse_args()
_lowercase: List[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 226 |
def __lowerCAmelCase ( __magic_name__ = 5_0 ):
_lowercase: Union[str, Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 226 | 1 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case_ : Tuple ={
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case_ : Union[str, Any] =logging.get_logger(__name__)
class a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ : List[str] = 'maskformer'
UpperCAmelCase_ : str = {'hidden_size': 'mask_feature_size'}
UpperCAmelCase_ : Any = ['resnet', 'swin']
UpperCAmelCase_ : Dict = ['detr']
def __init__( self , lowercase__ = 256 , lowercase__ = 256 , lowercase__ = 0.1 , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0.02 , lowercase__ = 1.0 , lowercase__ = 1.0 , lowercase__ = 1.0 , lowercase__ = 20.0 , lowercase__ = None , **lowercase__ , ) -> Dict:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__A = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(lowercase__ , lowercase__ ):
__A = backbone_config.pop("model_type" )
__A = CONFIG_MAPPING[backbone_model_type]
__A = config_class.from_dict(lowercase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__A = DetrConfig()
else:
# verify that the decoder is supported
__A = (
decoder_config.pop("model_type" ) if isinstance(lowercase__ , lowercase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {','.join(self.decoders_supported )}""" )
if isinstance(lowercase__ , lowercase__ ):
__A = CONFIG_MAPPING[decoder_type]
__A = config_class.from_dict(lowercase__ )
__A = backbone_config
__A = decoder_config
# main feature dimension for the model
__A = fpn_feature_size
__A = mask_feature_size
# initializer
__A = init_std
__A = init_xavier_std
# Hungarian matcher && loss
__A = cross_entropy_weight
__A = dice_weight
__A = mask_weight
__A = use_auxiliary_loss
__A = no_object_weight
__A = output_auxiliary_logits
__A = self.decoder_config.encoder_attention_heads
__A = self.decoder_config.num_hidden_layers
super().__init__(**lowercase__ )
@classmethod
def _lowerCamelCase ( cls , lowercase__ , lowercase__ , **lowercase__ ) -> Optional[int]:
return cls(
backbone_config=lowercase__ , decoder_config=lowercase__ , **lowercase__ , )
def _lowerCamelCase ( self ) -> Dict[str, any]:
__A = copy.deepcopy(self.__dict__ )
__A = self.backbone_config.to_dict()
__A = self.decoder_config.to_dict()
__A = self.__class__.model_type
return output
| 205 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case_ : Optional[Any] =Lock()
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCAmelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__A = min(lowerCAmelCase__ , lowerCAmelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCAmelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__A = max(lowerCAmelCase__ , lowerCAmelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCAmelCase__ )
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = []
__A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__A = Pipe()
__A = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__A = temp_rs
__A = temp_rr
for i in range(1 , len(lowerCAmelCase__ ) - 1 ):
__A = Pipe()
__A = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__A = temp_rs
__A = temp_rr
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(
len(lowerCAmelCase__ ) - 1,
arr[len(lowerCAmelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCAmelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCAmelCase__ ) ):
__A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase ( ):
'''simple docstring'''
__A = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*lowerCAmelCase__ )
__A = odd_even_transposition(lowerCAmelCase__ )
print("Sorted List\n" )
print(*lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 205 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Union[str, Any] = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __lowercase : Any ) -> List[Any]:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple:
_snake_case = {}
for old_key in state_dict.keys():
_snake_case = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
_snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case = state_dict[old_key]
return new_dict
def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]:
_snake_case = []
_snake_case = 0
os.makedirs(__lowercase , exist_ok=__lowercase )
for expert in range(__lowercase ):
_snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
_snake_case = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = os.path.join(
__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase , __lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
_snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = rename_fairseq_keys(__lowercase , __lowercase )
_snake_case = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
_snake_case = os.path.join(__lowercase , __lowercase )
torch.save(__lowercase , __lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase , __lowercase )
# Otherwise, let's build the index
_snake_case = {}
for idx, shard in enumerate(__lowercase ):
_snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
_snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) )
for key in shard:
_snake_case = shard_file
# Add the metadata
_snake_case = {'total_size': total_size}
_snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f:
_snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 686 | 1 |
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase = data
_UpperCAmelCase = None
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = None
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.head
while temp is not None:
print(temp.data , end=""" """ )
_UpperCAmelCase = temp.next
print()
def lowerCamelCase ( self : Tuple , lowerCamelCase : Any ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = Node(lowerCamelCase )
_UpperCAmelCase = self.head
_UpperCAmelCase = new_node
def lowerCamelCase ( self : str , lowerCamelCase : str , lowerCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
__a: List[str] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list() | 402 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: Any = logging.get_logger(__name__)
__a: int = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''transfo-xl'''
_lowerCamelCase = ['''mems''']
_lowerCamelCase = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , lowerCamelCase : int=26_7735 , lowerCamelCase : Union[str, Any]=[2_0000, 4_0000, 20_0000] , lowerCamelCase : Any=1024 , lowerCamelCase : List[str]=1024 , lowerCamelCase : Optional[Any]=16 , lowerCamelCase : Any=64 , lowerCamelCase : Optional[Any]=4096 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Dict=18 , lowerCamelCase : List[Any]=1600 , lowerCamelCase : List[str]=1000 , lowerCamelCase : Any=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[int]=0 , lowerCamelCase : List[str]=-1 , lowerCamelCase : str=True , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : int=True , lowerCamelCase : str="normal" , lowerCamelCase : int=0.01 , lowerCamelCase : int=0.01 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=1E-5 , lowerCamelCase : Union[str, Any]=0 , **lowerCamelCase : Dict , ) -> int:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_UpperCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_UpperCAmelCase = [False] + [False] * len(self.cutoffs )
_UpperCAmelCase = d_model
_UpperCAmelCase = d_embed
_UpperCAmelCase = d_head
_UpperCAmelCase = d_inner
_UpperCAmelCase = div_val
_UpperCAmelCase = pre_lnorm
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = mem_len
_UpperCAmelCase = same_length
_UpperCAmelCase = attn_type
_UpperCAmelCase = clamp_len
_UpperCAmelCase = sample_softmax
_UpperCAmelCase = adaptive
_UpperCAmelCase = dropout
_UpperCAmelCase = dropatt
_UpperCAmelCase = untie_r
_UpperCAmelCase = init
_UpperCAmelCase = init_range
_UpperCAmelCase = proj_init_std
_UpperCAmelCase = init_std
_UpperCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def lowerCamelCase ( self : Dict , lowerCamelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 402 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=1_3 , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : List[str]=9_9 , UpperCamelCase__ : Union[str, Any]=3_2 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : int=3_7 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : int=5_1_2 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=0 , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
snake_case__ = projection_dim
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
snake_case__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
snake_case__ = random_attention_mask([self.batch_size, self.seq_length])
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
snake_case__ = ids_tensor([self.batch_size] , self.num_choices)
snake_case__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
snake_case__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = TFDPRContextEncoder(config=UpperCamelCase__)
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__)
snake_case__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__)
snake_case__ = model(UpperCamelCase__)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def __magic_name__ ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = TFDPRQuestionEncoder(config=UpperCamelCase__)
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__)
snake_case__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__)
snake_case__ = model(UpperCamelCase__)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = TFDPRReader(config=UpperCamelCase__)
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_lowercase : str = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
_lowercase : Any = False
_lowercase : List[Any] = False
_lowercase : Dict = False
_lowercase : int = False
_lowercase : int = False
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = TFDPRModelTester(self)
snake_case__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7)
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCamelCase__)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCamelCase__)
@slow
def __magic_name__ ( self : Any):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = TFDPRContextEncoder.from_pretrained(UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = TFDPRContextEncoder.from_pretrained(UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = TFDPRQuestionEncoder.from_pretrained(UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = TFDPRReader.from_pretrained(UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""")
snake_case__ = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]]) # [CLS] hello, is my dog cute? [SEP]
snake_case__ = model(UpperCamelCase__)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
snake_case__ = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
])
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4))
| 654 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
_lowercase : List[str] = PegasusConfig
_lowercase : Union[str, Any] = {}
_lowercase : Tuple = '''gelu'''
def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ):
'''simple docstring'''
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1)
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return config, inputs_dict
def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder()
snake_case__ = inputs_dict["""input_ids"""]
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict["""attention_mask"""][:1, :]
snake_case__ = inputs_dict["""head_mask"""]
snake_case__ = 1
# first forward pass
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__)
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1)
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1)
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0]
snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1]))
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3)
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ):
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_lowercase : List[Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Optional[int] = True
_lowercase : Dict = False
_lowercase : Any = False
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = TFPegasusModelTester(self)
snake_case__ = ConfigTester(self , config_class=UpperCamelCase__)
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__)
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_lowercase : str = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_lowercase : int = '''google/pegasus-xsum'''
@cached_property
def __magic_name__ ( self : Dict):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.translate_src_text(**UpperCamelCase__)
assert self.expected_text == generated_words
def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""")
snake_case__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__)
return generated_words
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 654 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['''PerceiverFeatureExtractor''']
snake_case = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 404 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = ['image_processor', 'tokenizer']
__lowerCAmelCase : Union[str, Any] = 'CLIPImageProcessor'
__lowerCAmelCase : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = kwargs.pop("""feature_extractor""")
lowercase__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
lowercase__ : int = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
if images is not None:
lowercase__ : Tuple = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
if text is not None and images is not None:
lowercase__ : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_) , tensor_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
@property
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.tokenizer.model_input_names
lowercase__ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def lowercase__ ( self):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def lowercase__ ( self):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 12 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase_ ( __snake_case : Any ) -> List[Any]:
'''simple docstring'''
if "model" in orig_key:
snake_case__ :Any = orig_key.replace("model." , "" )
if "norm1" in orig_key:
snake_case__ :Tuple = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
snake_case__ :List[Any] = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
snake_case__ :Optional[int] = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
snake_case__ :Optional[int] = orig_key.split("." )[0].split("_" )[-1]
snake_case__ :Tuple = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
snake_case__ :int = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
snake_case__ :Tuple = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
snake_case__ :Dict = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
snake_case__ :Tuple = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
snake_case__ :Dict = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
snake_case__ :Tuple = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
snake_case__ :Optional[int] = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
snake_case__ :Dict = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
snake_case__ :Union[str, Any] = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
snake_case__ :Union[str, Any] = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
snake_case__ :Any = "yoso." + orig_key
return orig_key
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[str] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case__ :Optional[Any] = orig_state_dict.pop(__snake_case )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case__ :Union[str, Any] = val
snake_case__ :Any = orig_state_dict["cls.predictions.decoder.bias"]
snake_case__ :Optional[Any] = torch.arange(__snake_case ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase_ ( __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Dict = torch.load(__snake_case , map_location="cpu" )["model_state_dict"]
snake_case__ :List[str] = YosoConfig.from_json_file(__snake_case )
snake_case__ :Optional[int] = YosoForMaskedLM(__snake_case )
snake_case__ :int = convert_checkpoint_helper(config.max_position_embeddings , __snake_case )
print(model.load_state_dict(__snake_case ) )
model.eval()
model.save_pretrained(__snake_case )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
__UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase : Optional[int] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path) | 241 | 0 |
"""simple docstring"""
__magic_name__ = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
__magic_name__ = {value: key for key, value in encode_dict.items()}
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def _A ( __lowercase ):
"""simple docstring"""
if set(__lowercase ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
lowerCamelCase__ = """"""
for word in coded.split():
while len(__lowercase ) != 0:
decoded += decode_dict[word[:5]]
lowerCamelCase__ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 258 |
"""simple docstring"""
from itertools import count
def _A ( __lowercase = 50 ):
"""simple docstring"""
lowerCamelCase__ = [1] * min_block_length
for n in count(__lowercase ):
fill_count_functions.append(1 )
for block_length in range(__lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F'{solution() = }')
| 258 | 1 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_SCREAMING_SNAKE_CASE = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def lowercase( UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = {}
state_dict.pop("""pixel_mean""" , _lowerCamelCase )
state_dict.pop("""pixel_std""" , _lowerCamelCase )
UpperCamelCase = R""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCamelCase = key.replace(_lowerCamelCase , _lowerCamelCase )
if re.match(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase = int(re.match(_lowerCamelCase , _lowerCamelCase ).group(2 ) )
if layer_nb == 0:
UpperCamelCase = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
UpperCamelCase = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
UpperCamelCase = key.replace("""layers.2""" , """proj_out""" )
UpperCamelCase = value
UpperCamelCase = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="ybelkada/segment-anything" ) -> int:
'''simple docstring'''
UpperCamelCase = hf_hub_download(_lowerCamelCase , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
UpperCamelCase = SamConfig()
elif "sam_vit_l" in model_name:
UpperCamelCase = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
UpperCamelCase = SamConfig(
vision_config=_lowerCamelCase , )
elif "sam_vit_h" in model_name:
UpperCamelCase = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
UpperCamelCase = SamConfig(
vision_config=_lowerCamelCase , )
UpperCamelCase = torch.load(_lowerCamelCase , map_location="""cpu""" )
UpperCamelCase = replace_keys(_lowerCamelCase )
UpperCamelCase = SamImageProcessor()
UpperCamelCase = SamProcessor(image_processor=_lowerCamelCase )
UpperCamelCase = SamModel(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
UpperCamelCase = hf_model.to("""cuda""" )
UpperCamelCase = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
UpperCamelCase = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
UpperCamelCase = [[[400, 650]]]
UpperCamelCase = [[1]]
UpperCamelCase = processor(images=np.array(_lowerCamelCase ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
UpperCamelCase = hf_model(**_lowerCamelCase )
UpperCamelCase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
UpperCamelCase = processor(
images=np.array(_lowerCamelCase ) , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
UpperCamelCase = hf_model(**_lowerCamelCase )
UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
UpperCamelCase = ((75, 275, 1725, 850),)
UpperCamelCase = processor(images=np.array(_lowerCamelCase ) , input_boxes=_lowerCamelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
UpperCamelCase = hf_model(**_lowerCamelCase )
UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
UpperCamelCase = [[[400, 650], [800, 650]]]
UpperCamelCase = [[1, 1]]
UpperCamelCase = processor(
images=np.array(_lowerCamelCase ) , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
UpperCamelCase = hf_model(**_lowerCamelCase )
UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 537 |
"""simple docstring"""
__lowercase : Union[str, Any] = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
__lowercase : Any = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def lowerCamelCase_ ( _lowerCamelCase : float , _lowerCamelCase : str , _lowerCamelCase : str ):
lowerCamelCase_ = from_type.lower().strip('''s''' )
lowerCamelCase_ = to_type.lower().strip('''s''' )
lowerCamelCase_ = UNIT_SYMBOL.get(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = UNIT_SYMBOL.get(_lowerCamelCase , _lowerCamelCase )
if from_sanitized not in METRIC_CONVERSION:
lowerCamelCase_ = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(_lowerCamelCase )}"""
)
raise ValueError(_lowerCamelCase )
if to_sanitized not in METRIC_CONVERSION:
lowerCamelCase_ = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(_lowerCamelCase )}"""
)
raise ValueError(_lowerCamelCase )
lowerCamelCase_ = METRIC_CONVERSION[from_sanitized]
lowerCamelCase_ = METRIC_CONVERSION[to_sanitized]
lowerCamelCase_ = 1
if from_exponent > to_exponent:
lowerCamelCase_ = from_exponent - to_exponent
else:
lowerCamelCase_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod() | 142 | 0 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def __snake_case ( UpperCAmelCase_ : bytes ):
if len(a_ ) != 32:
raise ValueError("Input must be of length 32" )
lowerCamelCase_ = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __snake_case ( UpperCAmelCase_ : int ):
if i < 0:
raise ValueError("Input must be non-negative" )
lowerCamelCase_ = format(a_ , "08x" )[-8:]
lowerCamelCase_ = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def __snake_case ( UpperCAmelCase_ : bytes ):
lowerCamelCase_ = B''''''
for char in message:
bit_string += format(a_ , "08b" ).encode("utf-8" )
lowerCamelCase_ = format(len(a_ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(a_ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __snake_case ( UpperCAmelCase_ : bytes ):
if len(a_ ) % 512 != 0:
raise ValueError("Input must have length that\'s a multiple of 512" )
for pos in range(0 , len(a_ ) , 512 ):
lowerCamelCase_ = bit_string[pos : pos + 512]
lowerCamelCase_ = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __snake_case ( UpperCAmelCase_ : int ):
if i < 0:
raise ValueError("Input must be non-negative" )
lowerCamelCase_ = format(a_ , "032b" )
lowerCamelCase_ = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(a_ , 2 )
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return (a + b) % 2**32
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __snake_case ( UpperCAmelCase_ : bytes ):
lowerCamelCase_ = preprocess(a_ )
lowerCamelCase_ = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCamelCase_ = 0X6_7_4_5_2_3_0_1
lowerCamelCase_ = 0Xe_f_c_d_a_b_8_9
lowerCamelCase_ = 0X9_8_b_a_d_c_f_e
lowerCamelCase_ = 0X1_0_3_2_5_4_7_6
lowerCamelCase_ = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(a_ ):
lowerCamelCase_ = aa
lowerCamelCase_ = ba
lowerCamelCase_ = ca
lowerCamelCase_ = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCamelCase_ = d ^ (b & (c ^ d))
lowerCamelCase_ = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCamelCase_ = c ^ (d & (b ^ c))
lowerCamelCase_ = (5 * i + 1) % 16
elif i <= 47:
lowerCamelCase_ = b ^ c ^ d
lowerCamelCase_ = (3 * i + 5) % 16
else:
lowerCamelCase_ = c ^ (b | not_aa(a_ ))
lowerCamelCase_ = (7 * i) % 16
lowerCamelCase_ = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCamelCase_ = d
lowerCamelCase_ = c
lowerCamelCase_ = b
lowerCamelCase_ = sum_aa(a_ , left_rotate_aa(a_ , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCamelCase_ = sum_aa(a_ , a_ )
lowerCamelCase_ = sum_aa(a_ , a_ )
lowerCamelCase_ = sum_aa(a_ , a_ )
lowerCamelCase_ = sum_aa(a_ , a_ )
lowerCamelCase_ = reformat_hex(a_ ) + reformat_hex(a_ ) + reformat_hex(a_ ) + reformat_hex(a_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ : Dict = logging.get_logger(__name__)
a_ : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Optional[Any] = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
a_ : str = {
"""gpt-neox-20b""": 2048,
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="<|endoftext|>" , UpperCamelCase="<|endoftext|>" , UpperCamelCase="<|endoftext|>" , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase ) != add_prefix_space:
lowerCamelCase_ = getattr(UpperCamelCase , pre_tok_state.pop("type" ) )
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = pre_tok_class(**UpperCamelCase )
lowerCamelCase_ = add_prefix_space
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) + [self.eos_token_id] )
if len(UpperCamelCase ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 445 | 0 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCAmelCase__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ = f'down_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'input_blocks.{3*i + j + 1}.0.'
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ = f'down_blocks.{i}.attentions.{j}.'
lowerCAmelCase__ = f'input_blocks.{3*i + j + 1}.1.'
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ = f'up_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'output_blocks.{3*i + j}.0.'
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ = f'up_blocks.{i}.attentions.{j}.'
lowerCAmelCase__ = f'output_blocks.{3*i + j}.1.'
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ = f'down_blocks.{i}.downsamplers.0.conv.'
lowerCAmelCase__ = f'input_blocks.{3*(i+1)}.0.op.'
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ = f'up_blocks.{i}.upsamplers.0.'
lowerCAmelCase__ = f'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ = '''mid_block.attentions.0.'''
lowerCAmelCase__ = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ = f'mid_block.resnets.{j}.'
lowerCAmelCase__ = f'middle_block.{2*j}.'
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _A ( A__ ):
"""simple docstring"""
__lowercase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
__lowercase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
__lowercase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ = f'encoder.down_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'encoder.down.{i}.block.{j}.'
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ = f'down_blocks.{i}.downsamplers.0.'
lowerCAmelCase__ = f'down.{i}.downsample.'
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ = f'up_blocks.{i}.upsamplers.0.'
lowerCAmelCase__ = f'up.{3-i}.upsample.'
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ = f'decoder.up_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'decoder.up.{3-i}.block.{j}.'
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ = f'mid_block.resnets.{i}.'
lowerCAmelCase__ = f'mid.block_{i+1}.'
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def _A ( A__ ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _A ( A__ ):
"""simple docstring"""
__lowercase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
__lowercase = {v: vae_state_dict[k] for k, v in mapping.items()}
__lowercase = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"mid.attn_1.{weight_name}.weight" in k:
print(F"Reshaping {k} for SD format" )
__lowercase = reshape_weight_for_sd(A__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCAmelCase__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ = {'''q''': 0, '''k''': 1, '''v''': 2}
def _A ( A__ ):
"""simple docstring"""
__lowercase = {}
__lowercase = {}
__lowercase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
__lowercase = k[: -len('''.q_proj.weight''' )]
__lowercase = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
__lowercase = [None, None, None]
__lowercase = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
__lowercase = k[: -len('''.q_proj.bias''' )]
__lowercase = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
__lowercase = [None, None, None]
__lowercase = v
continue
__lowercase = textenc_pattern.sub(lambda A__ : protected[re.escape(m.group(0 ) )] , A__ )
__lowercase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__lowercase = textenc_pattern.sub(lambda A__ : protected[re.escape(m.group(0 ) )] , A__ )
__lowercase = torch.cat(A__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__lowercase = textenc_pattern.sub(lambda A__ : protected[re.escape(m.group(0 ) )] , A__ )
__lowercase = torch.cat(A__ )
return new_state_dict
def _A ( A__ ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCAmelCase__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ = load_file(unet_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCAmelCase__ = load_file(vae_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCAmelCase__ = load_file(text_enc_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCAmelCase__ = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCAmelCase__ = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 41 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : Optional[Any] = ReformerTokenizer
_A : str = ReformerTokenizerFast
_A : List[str] = True
_A : Tuple = False
_A : str = True
def A_ ( self ):
super().setUp()
snake_case__ = ReformerTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self ):
snake_case__ = "<s>"
snake_case__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def A_ ( self ):
snake_case__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCamelCase ) , 10_00 )
def A_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def A_ ( self ):
if not self.test_rust_tokenizer:
return
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_rust_tokenizer()
snake_case__ = "I was born in 92000, and this is falsé."
snake_case__ = tokenizer.tokenize(lowerCamelCase )
snake_case__ = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
snake_case__ = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
snake_case__ = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
snake_case__ = self.get_rust_tokenizer()
snake_case__ = tokenizer.encode(lowerCamelCase )
snake_case__ = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def A_ ( self , lowerCamelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
# Simple input
snake_case__ = "This is a simple input"
snake_case__ = ["This is a simple input 1", "This is a simple input 2"]
snake_case__ = ("This is a simple input", "This is a pair")
snake_case__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" , )
def A_ ( self ):
pass
def A_ ( self ):
snake_case__ = ReformerTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
snake_case__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [2_85, 46, 10, 1_70, 3_82] , )
snake_case__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case__ = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case__ = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def A_ ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def A_ ( self ):
snake_case__ = "Hello World!"
snake_case__ = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def A_ ( self ):
snake_case__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
snake_case__ = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@require_torch
@slow
def A_ ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case__ = " ".join(lowerCamelCase )
snake_case__ = self.big_tokenizer.encode_plus(lowerCamelCase , return_tensors="pt" )
snake_case__ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
snake_case__ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case__ = encoded_sequence["input_ids"].shape
snake_case__ = ReformerModel(lowerCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase )
model(**lowerCamelCase )
@slow
def A_ ( self ):
# fmt: off
snake_case__ = {"input_ids": [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case__ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowerCamelCase , sequences=lowerCamelCase , )
| 530 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> List[Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=_snake_case , speech_processor=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , unet=_snake_case , scheduler=_snake_case , feature_extractor=_snake_case , )
def snake_case_ ( self , _snake_case = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_snake_case )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(_snake_case )
@torch.no_grad()
def __call__( self , _snake_case , _snake_case=1_6000 , _snake_case = 512 , _snake_case = 512 , _snake_case = 50 , _snake_case = 7.5 , _snake_case = None , _snake_case = 1 , _snake_case = 0.0 , _snake_case = None , _snake_case = None , _snake_case = "pil" , _snake_case = True , _snake_case = None , _snake_case = 1 , **_snake_case , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.speech_processor.feature_extractor(
_snake_case , return_tensors='''pt''' , sampling_rate=_snake_case ).input_features.to(self.device )
UpperCAmelCase = self.speech_model.generate(_snake_case , max_length=48_0000 )
UpperCAmelCase = self.speech_processor.tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , normalize=_snake_case )[
0
]
if isinstance(_snake_case , _snake_case ):
UpperCAmelCase = 1
elif isinstance(_snake_case , _snake_case ):
UpperCAmelCase = len(_snake_case )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_snake_case )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_snake_case , _snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_snake_case )}.""" )
# get prompt text embeddings
UpperCAmelCase = self.tokenizer(
_snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = text_embeddings.shape
UpperCAmelCase = text_embeddings.repeat(1 , _snake_case , 1 )
UpperCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , _snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase = 42
if negative_prompt is None:
UpperCAmelCase = [''''''] * batch_size
elif type(_snake_case ) is not type(_snake_case ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_snake_case )} !="""
f""" {type(_snake_case )}.""" )
elif isinstance(_snake_case , _snake_case ):
UpperCAmelCase = [negative_prompt]
elif batch_size != len(_snake_case ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_snake_case )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
UpperCAmelCase = negative_prompt
UpperCAmelCase = text_input_ids.shape[-1]
UpperCAmelCase = self.tokenizer(
_snake_case , padding='''max_length''' , max_length=_snake_case , truncation=_snake_case , return_tensors='''pt''' , )
UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase = uncond_embeddings.shape[1]
UpperCAmelCase = uncond_embeddings.repeat(1 , _snake_case , 1 )
UpperCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , _snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase = torch.randn(_snake_case , generator=_snake_case , device='''cpu''' , dtype=_snake_case ).to(
self.device )
else:
UpperCAmelCase = torch.randn(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase = {}
if accepts_eta:
UpperCAmelCase = eta
for i, t in enumerate(self.progress_bar(_snake_case ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = self.scheduler.scale_model_input(_snake_case , _snake_case )
# predict the noise residual
UpperCAmelCase = self.unet(_snake_case , _snake_case , encoder_hidden_states=_snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_snake_case , _snake_case , _snake_case )
UpperCAmelCase = 1 / 0.1_8215 * latents
UpperCAmelCase = self.vae.decode(_snake_case ).sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_snake_case )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_snake_case , nsfw_content_detected=_snake_case )
| 254 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__ = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
__magic_name__ = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"TurkuNLP/bert-base-finnish-cased-v1": 512,
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
"wietsedv/bert-base-dutch-cased": 512,
}
__magic_name__ = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> List[Any]:
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_snake_case , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_snake_case )
UpperCAmelCase = do_lower_case
def snake_case_ ( self , _snake_case , _snake_case=None ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , _snake_case , _snake_case = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 254 | 1 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Union[str, Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : List[Any] = {
"allenai/led-base-16384": 16384,
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[int] = LEDTokenizer
__UpperCamelCase : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCamelCase : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
UpperCamelCase : Dict = add_prefix_space
UpperCamelCase : Dict = pre_tok_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase : Union[str, Any] = '''post_processor'''
UpperCamelCase : int = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
UpperCamelCase : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase : int = tuple(state['''sep'''] )
if "cls" in state:
UpperCamelCase : int = tuple(state['''cls'''] )
UpperCamelCase : Union[str, Any] = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCamelCase : List[Any] = add_prefix_space
UpperCamelCase : List[Any] = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
UpperCamelCase : Dict = trim_offsets
UpperCamelCase : Any = True
if changes_to_apply:
UpperCamelCase : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
UpperCamelCase : Any = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowercase ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[int] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
UpperCamelCase : Dict = value
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : str = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase : Any = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase : Dict = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
UpperCamelCase : Dict = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase : str = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 643 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : List[Any] = True
except ImportError:
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = testing
UpperCamelCase : Any = testing_file
UpperCamelCase : Dict = path
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase : Dict = (
Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = configuration['''lowercase_modelname''']
UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
UpperCamelCase : Any = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Create temp file
UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp()
UpperCamelCase : Tuple = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file:
with open(__SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCamelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Remove original file
remove(__SCREAMING_SNAKE_CASE )
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def skip_units(__SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as datafile:
UpperCamelCase : int = []
UpperCamelCase : Dict = False
UpperCamelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE )
remove(__SCREAMING_SNAKE_CASE )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__SCREAMING_SNAKE_CASE )
| 643 | 1 |
import math
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCamelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 429 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __snake_case :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default="Translation" , init=_a , repr=_a )
def __call__( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __snake_case :
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default="TranslationVariableLanguages" , init=_a , repr=_a )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
_lowerCAmelCase : Optional[int] = len(self.languages ) if self.languages else None
def __call__( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(_UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_lowerCAmelCase : Dict = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_lowerCAmelCase , _lowerCAmelCase : int = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 429 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
_lowerCamelCase = get_failure_array(_a )
# 2) Step through text searching for pattern
_lowerCamelCase , _lowerCamelCase = 0, 0 # index into text, pattern
while i < len(_a ):
if pattern[j] == text[i]:
if j == (len(_a ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_lowerCamelCase = failure[j - 1]
continue
i += 1
return False
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = [0]
_lowerCamelCase = 0
_lowerCamelCase = 1
while j < len(_a ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_lowerCamelCase = failure[i - 1]
continue
j += 1
failure.append(_a )
return failure
if __name__ == "__main__":
# Test 1)
_UpperCAmelCase = "abc1abc12"
_UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_UpperCAmelCase = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
_UpperCAmelCase = "ABABX"
_UpperCAmelCase = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
_UpperCAmelCase = "AAAB"
_UpperCAmelCase = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
_UpperCAmelCase = "abcdabcy"
_UpperCAmelCase = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
_UpperCAmelCase = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 712 |
import heapq
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_a , [-1 * len(_a ), (key, value)] )
# chosen_vertices = set of chosen vertices
_lowerCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_lowerCamelCase = heapq.heappop(_a )[1][0]
chosen_vertices.add(_a )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_lowerCamelCase = elem[1][1].index(_a )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_a )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 297 | 0 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( _a , _a ):
@register_to_config
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ):
super().__init__()
UpperCamelCase__ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase__ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase )
else:
UpperCamelCase__ = None
UpperCamelCase__ = torch.nn.Parameter(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : VQModel
snake_case : CLIPTextModel
snake_case : CLIPTokenizer
snake_case : TransformeraDModel
snake_case : LearnedClassifierFreeSamplingEmbeddings
snake_case : VQDiffusionScheduler
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
super().__init__()
self.register_modules(
vqvae=__lowerCAmelCase , transformer=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , scheduler=__lowerCAmelCase , learned_classifier_free_sampling_embeddings=__lowerCAmelCase , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = len(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else 1
# get prompt text embeddings
UpperCamelCase__ = self.tokenizer(
__lowerCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCamelCase__ = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase__ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
UpperCamelCase__ = prompt_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase__ = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase__ = negative_prompt_embeds.unsqueeze(0 ).repeat(__lowerCAmelCase , 1 , 1 )
else:
UpperCamelCase__ = [""""""] * batch_size
UpperCamelCase__ = text_input_ids.shape[-1]
UpperCamelCase__ = self.tokenizer(
__lowerCAmelCase , padding="""max_length""" , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""" , )
UpperCamelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase__ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__lowerCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ = negative_prompt_embeds.shape[1]
UpperCamelCase__ = negative_prompt_embeds.repeat(1 , __lowerCAmelCase , 1 )
UpperCamelCase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __lowerCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = 100 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = 1 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 1 , ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = 1
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = len(__lowerCAmelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase )}""" )
UpperCamelCase__ = batch_size * num_images_per_prompt
UpperCamelCase__ = guidance_scale > 1.0
UpperCamelCase__ = self._encode_prompt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowerCAmelCase )}.""" )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase__ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase__ = self.transformer.num_vector_embeds - 1
UpperCamelCase__ = torch.full(__lowerCAmelCase , __lowerCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
UpperCamelCase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowerCAmelCase , device=self.device )
UpperCamelCase__ = self.scheduler.timesteps.to(self.device )
UpperCamelCase__ = latents
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase__ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase__ = self.transformer(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase ).sample
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ = model_output.chunk(2 )
UpperCamelCase__ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__lowerCAmelCase , dim=1 , keepdim=__lowerCAmelCase )
UpperCamelCase__ = self.truncate(__lowerCAmelCase , __lowerCAmelCase )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase__ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(__lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = self.vqvae.config.vq_embed_dim
UpperCamelCase__ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase__ = self.vqvae.quantize.get_codebook_entry(__lowerCAmelCase , shape=__lowerCAmelCase )
UpperCamelCase__ = self.vqvae.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ , UpperCamelCase__ = torch.sort(__lowerCAmelCase , 1 , descending=__lowerCAmelCase )
UpperCamelCase__ = torch.exp(__lowerCAmelCase )
UpperCamelCase__ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase__ = torch.full_like(keep_mask[:, 0:1, :] , __lowerCAmelCase )
UpperCamelCase__ = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase__ = keep_mask[:, :-1, :]
UpperCamelCase__ = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase__ = log_p_x_0.clone()
UpperCamelCase__ = -torch.inf # -inf = log(0)
return rv
| 619 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _UpperCamelCase (a__ :Tuple , a__ :List[str]=False ):
"""simple docstring"""
UpperCamelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _UpperCamelCase (a__ :int , a__ :Tuple , a__ :Union[str, Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ = """"""
else:
UpperCamelCase__ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
UpperCamelCase__ = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ = in_proj_bias[: config.hidden_size]
UpperCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ = in_proj_bias[-config.hidden_size :]
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
UpperCamelCase__ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def _UpperCamelCase (a__ :Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def _UpperCamelCase (a__ :List[str] , a__ :Any , a__ :str ):
"""simple docstring"""
UpperCamelCase__ = dct.pop(a__ )
UpperCamelCase__ = val
def _UpperCamelCase (a__ :Dict , a__ :Dict ):
"""simple docstring"""
UpperCamelCase__ = ViTMSNConfig()
UpperCamelCase__ = 1000
UpperCamelCase__ = """datasets/huggingface/label-files"""
UpperCamelCase__ = """imagenet-1k-id2label.json"""
UpperCamelCase__ = json.load(open(hf_hub_download(a__ , a__ ) , """r""" ) )
UpperCamelCase__ = {int(a__ ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCamelCase__ = 384
UpperCamelCase__ = 1536
UpperCamelCase__ = 6
elif "l16" in checkpoint_url:
UpperCamelCase__ = 1024
UpperCamelCase__ = 4096
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 0.1
elif "b4" in checkpoint_url:
UpperCamelCase__ = 4
elif "l7" in checkpoint_url:
UpperCamelCase__ = 7
UpperCamelCase__ = 1024
UpperCamelCase__ = 4096
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 0.1
UpperCamelCase__ = ViTMSNModel(a__ )
UpperCamelCase__ = torch.hub.load_state_dict_from_url(a__ , map_location="""cpu""" )["""target_encoder"""]
UpperCamelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(a__ )
UpperCamelCase__ = create_rename_keys(a__ , base_model=a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , base_model=a__ )
model.load_state_dict(a__ )
model.eval()
UpperCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase__ = Image.open(requests.get(a__ , stream=a__ ).raw )
UpperCamelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=a__ , image_std=a__ )
UpperCamelCase__ = image_processor(images=a__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
UpperCamelCase__ = model(**a__ )
UpperCamelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
UpperCamelCase__ = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase__ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 619 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a__ : str ={
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] =[
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] =[
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a__ : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __get__( self : Optional[int] , __A : List[Any] , __A : Union[str, Any]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
__UpperCamelCase = '__cached_' + self.fget.__name__
__UpperCamelCase = getattr(__A , __A , __A )
if cached is None:
__UpperCamelCase = self.fget(__A )
setattr(__A , __A , __A )
return cached
def lowercase__ ( __lowercase : List[str] ) -> Any:
"""simple docstring"""
__UpperCamelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'''invalid truth value {val!r}''' )
def lowercase__ ( __lowercase : str ) -> Optional[int]:
"""simple docstring"""
if is_torch_fx_proxy(__lowercase ):
return True
if is_torch_available():
import torch
if isinstance(__lowercase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__lowercase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__lowercase , (jnp.ndarray, Tracer) ):
return True
return isinstance(__lowercase , np.ndarray )
def lowercase__ ( __lowercase : Optional[Any] ) -> int:
"""simple docstring"""
return isinstance(__lowercase , np.ndarray )
def lowercase__ ( __lowercase : str ) -> Tuple:
"""simple docstring"""
return _is_numpy(__lowercase )
def lowercase__ ( __lowercase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
import torch
return isinstance(__lowercase , torch.Tensor )
def lowercase__ ( __lowercase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch(__lowercase )
def lowercase__ ( __lowercase : List[str] ) -> Tuple:
"""simple docstring"""
import torch
return isinstance(__lowercase , torch.device )
def lowercase__ ( __lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(__lowercase )
def lowercase__ ( __lowercase : int ) -> Any:
"""simple docstring"""
import torch
if isinstance(__lowercase , __lowercase ):
if hasattr(__lowercase , __lowercase ):
__UpperCamelCase = getattr(__lowercase , __lowercase )
else:
return False
return isinstance(__lowercase , torch.dtype )
def lowercase__ ( __lowercase : Dict ) -> str:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(__lowercase )
def lowercase__ ( __lowercase : List[Any] ) -> Tuple:
"""simple docstring"""
import tensorflow as tf
return isinstance(__lowercase , tf.Tensor )
def lowercase__ ( __lowercase : Dict ) -> Optional[Any]:
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(__lowercase )
def lowercase__ ( __lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__lowercase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(__lowercase )
return type(__lowercase ) == tf.Tensor
def lowercase__ ( __lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(__lowercase )
def lowercase__ ( __lowercase : Dict ) -> List[Any]:
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(__lowercase , jnp.ndarray )
def lowercase__ ( __lowercase : List[Any] ) -> Dict:
"""simple docstring"""
return False if not is_flax_available() else _is_jax(__lowercase )
def lowercase__ ( __lowercase : List[str] ) -> Any:
"""simple docstring"""
if isinstance(__lowercase , (dict, UserDict) ):
return {k: to_py_obj(__lowercase ) for k, v in obj.items()}
elif isinstance(__lowercase , (list, tuple) ):
return [to_py_obj(__lowercase ) for o in obj]
elif is_tf_tensor(__lowercase ):
return obj.numpy().tolist()
elif is_torch_tensor(__lowercase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__lowercase ):
return np.asarray(__lowercase ).tolist()
elif isinstance(__lowercase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowercase__ ( __lowercase : List[str] ) -> Tuple:
"""simple docstring"""
if isinstance(__lowercase , (dict, UserDict) ):
return {k: to_numpy(__lowercase ) for k, v in obj.items()}
elif isinstance(__lowercase , (list, tuple) ):
return np.array(__lowercase )
elif is_tf_tensor(__lowercase ):
return obj.numpy()
elif is_torch_tensor(__lowercase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__lowercase ):
return np.asarray(__lowercase )
else:
return obj
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def _lowerCamelCase ( self : str ):
__UpperCamelCase = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
__UpperCamelCase = getattr(self , class_fields[0].name )
__UpperCamelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A , __A ):
__UpperCamelCase = first_field.items()
__UpperCamelCase = True
else:
try:
__UpperCamelCase = iter(__A )
__UpperCamelCase = True
except TypeError:
__UpperCamelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A , (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0] , __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__UpperCamelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__UpperCamelCase = element[1]
elif first_field is not None:
__UpperCamelCase = first_field
else:
for field in class_fields:
__UpperCamelCase = getattr(self , field.name )
if v is not None:
__UpperCamelCase = v
def __delitem__( self : List[str] , *__A : Dict , **__A : Any ):
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _lowerCamelCase ( self : Union[str, Any] , *__A : str , **__A : Union[str, Any] ):
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _lowerCamelCase ( self : Tuple , *__A : List[str] , **__A : Optional[Any] ):
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _lowerCamelCase ( self : Union[str, Any] , *__A : Tuple , **__A : Union[str, Any] ):
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Any , __A : Tuple ):
if isinstance(__A , __A ):
__UpperCamelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Any , __A : Any , __A : Tuple ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A , __A )
super().__setattr__(__A , __A )
def __setitem__( self : Union[str, Any] , __A : int , __A : List[Any] ):
# Will raise a KeyException if needed
super().__setitem__(__A , __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A , __A )
def _lowerCamelCase ( self : List[Any] ):
return tuple(self[k] for k in self.keys() )
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@classmethod
def _lowerCamelCase ( cls : List[str] , __A : Optional[int] ):
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] ="longest"
SCREAMING_SNAKE_CASE_ : Dict ="max_length"
SCREAMING_SNAKE_CASE_ : str ="do_not_pad"
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="pt"
SCREAMING_SNAKE_CASE_ : str ="tf"
SCREAMING_SNAKE_CASE_ : str ="np"
SCREAMING_SNAKE_CASE_ : Optional[int] ="jax"
class snake_case :
"""simple docstring"""
def __init__( self : Any , __A : List[ContextManager] ):
__UpperCamelCase = context_managers
__UpperCamelCase = ExitStack()
def __enter__( self : Tuple ):
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self : str , *__A : Dict , **__A : Optional[int] ):
self.stack.__exit__(*__A , **__A )
def lowercase__ ( __lowercase : str ) -> Any:
"""simple docstring"""
__UpperCamelCase = infer_framework(__lowercase )
if framework == "tf":
__UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowercase__ ( __lowercase : str ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = model_class.__name__
__UpperCamelCase = infer_framework(__lowercase )
if framework == "tf":
__UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowercase__ ( __lowercase : MutableMapping , __lowercase : str = "" , __lowercase : str = "." ) -> Union[str, Any]:
"""simple docstring"""
def _flatten_dict(__lowercase : Tuple , __lowercase : List[Any]="" , __lowercase : Optional[Any]="." ):
for k, v in d.items():
__UpperCamelCase = str(__lowercase ) + delimiter + str(__lowercase ) if parent_key else k
if v and isinstance(__lowercase , __lowercase ):
yield from flatten_dict(__lowercase , __lowercase , delimiter=__lowercase ).items()
else:
yield key, v
return dict(_flatten_dict(__lowercase , __lowercase , __lowercase ) )
@contextmanager
def lowercase__ ( __lowercase : List[str] , __lowercase : bool = False ) -> Dict:
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowercase__ ( __lowercase : Optional[int] , __lowercase : str=None ) -> Union[str, Any]:
"""simple docstring"""
if is_numpy_array(__lowercase ):
return np.transpose(__lowercase , axes=__lowercase )
elif is_torch_tensor(__lowercase ):
return array.T if axes is None else array.permute(*__lowercase )
elif is_tf_tensor(__lowercase ):
import tensorflow as tf
return tf.transpose(__lowercase , perm=__lowercase )
elif is_jax_tensor(__lowercase ):
return jnp.transpose(__lowercase , axes=__lowercase )
else:
raise ValueError(F'''Type not supported for transpose: {type(__lowercase )}.''' )
def lowercase__ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if is_numpy_array(__lowercase ):
return np.reshape(__lowercase , __lowercase )
elif is_torch_tensor(__lowercase ):
return array.reshape(*__lowercase )
elif is_tf_tensor(__lowercase ):
import tensorflow as tf
return tf.reshape(__lowercase , __lowercase )
elif is_jax_tensor(__lowercase ):
return jnp.reshape(__lowercase , __lowercase )
else:
raise ValueError(F'''Type not supported for reshape: {type(__lowercase )}.''' )
def lowercase__ ( __lowercase : Dict , __lowercase : Union[str, Any]=None ) -> Dict:
"""simple docstring"""
if is_numpy_array(__lowercase ):
return np.squeeze(__lowercase , axis=__lowercase )
elif is_torch_tensor(__lowercase ):
return array.squeeze() if axis is None else array.squeeze(dim=__lowercase )
elif is_tf_tensor(__lowercase ):
import tensorflow as tf
return tf.squeeze(__lowercase , axis=__lowercase )
elif is_jax_tensor(__lowercase ):
return jnp.squeeze(__lowercase , axis=__lowercase )
else:
raise ValueError(F'''Type not supported for squeeze: {type(__lowercase )}.''' )
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(__lowercase ):
return np.expand_dims(__lowercase , __lowercase )
elif is_torch_tensor(__lowercase ):
return array.unsqueeze(dim=__lowercase )
elif is_tf_tensor(__lowercase ):
import tensorflow as tf
return tf.expand_dims(__lowercase , axis=__lowercase )
elif is_jax_tensor(__lowercase ):
return jnp.expand_dims(__lowercase , axis=__lowercase )
else:
raise ValueError(F'''Type not supported for expand_dims: {type(__lowercase )}.''' )
def lowercase__ ( __lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
if is_numpy_array(__lowercase ):
return np.size(__lowercase )
elif is_torch_tensor(__lowercase ):
return array.numel()
elif is_tf_tensor(__lowercase ):
import tensorflow as tf
return tf.size(__lowercase )
elif is_jax_tensor(__lowercase ):
return array.size
else:
raise ValueError(F'''Type not supported for expand_dims: {type(__lowercase )}.''' )
def lowercase__ ( __lowercase : Tuple , __lowercase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(__lowercase , (tuple, list) ):
__UpperCamelCase = [F'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
__UpperCamelCase = F'''{repo_id}--{value}'''
return auto_map
def lowercase__ ( __lowercase : str ) -> List[Any]:
"""simple docstring"""
for base_class in inspect.getmro(__lowercase ):
__UpperCamelCase = base_class.__module__
__UpperCamelCase = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'''Could not infer framework from class {model_class}.''' )
| 434 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.