code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = "upernet"
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=5_12 , lowerCamelCase_=0.02 , lowerCamelCase_=[1, 2, 3, 6] , lowerCamelCase_=True , lowerCamelCase_=0.4 , lowerCamelCase_=3_84 , lowerCamelCase_=2_56 , lowerCamelCase_=1 , lowerCamelCase_=False , lowerCamelCase_=2_55 , **lowerCamelCase_ , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCAmelCase__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = backbone_config.get('''model_type''' )
lowerCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase__ = config_class.from_dict(lowerCamelCase_ )
lowerCAmelCase__ = backbone_config
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = pool_scales
lowerCAmelCase__ = use_auxiliary_head
lowerCAmelCase__ = auxiliary_loss_weight
lowerCAmelCase__ = auxiliary_in_channels
lowerCAmelCase__ = auxiliary_channels
lowerCAmelCase__ = auxiliary_num_convs
lowerCAmelCase__ = auxiliary_concat_input
lowerCAmelCase__ = loss_ignore_index
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.backbone_config.to_dict()
lowerCAmelCase__ = self.__class__.model_type
return output | 90 |
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = num - 1
__a = 0
while s % 2 == 0:
__a = s // 2
t += 1
for _ in range(5 ):
__a = random.randrange(2 , num - 1 )
__a = pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if v != 1:
__a = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__a = i + 1
__a = (v**2) % num
return True
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if num < 2:
return False
__a = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 1024 ):
"""simple docstring"""
while True:
__a = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_SCREAMING_SNAKE_CASE ):
return num
if __name__ == "__main__":
lowerCamelCase__ = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 225 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
A__ : Union[str, Any] = 'vision-encoder-decoder'
A__ : List[str] = True
def __init__( self : Union[str, Any] , **_snake_case : Optional[int] ):
"""simple docstring"""
super().__init__(**_snake_case )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
A__ = kwargs.pop('encoder' )
A__ = encoder_config.pop('model_type' )
A__ = kwargs.pop('decoder' )
A__ = decoder_config.pop('model_type' )
A__ = AutoConfig.for_model(_snake_case , **_snake_case )
A__ = AutoConfig.for_model(_snake_case , **_snake_case )
A__ = True
@classmethod
def _a ( cls : Union[str, Any] , _snake_case : Dict , _snake_case : str , **_snake_case : int ):
"""simple docstring"""
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = copy.deepcopy(self.__dict__ )
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
class __lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
A__ : Any = version.parse("1.11" )
@property
def _a ( self : int ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return 1E-4
@property
def _a ( self : List[str] ):
"""simple docstring"""
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class __lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
@property
def _a ( self : List[str] ):
"""simple docstring"""
A__ = OrderedDict()
A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
A__ = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _a ( self : List[Any] , _snake_case : Any , _snake_case : Optional[Any] = -1 , _snake_case : Any = -1 , _snake_case : Optional[Any] = False , _snake_case : int = None , ):
"""simple docstring"""
import torch
A__ = OrderedDict()
A__ = super().generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
A__ , A__ = dummy_input['input_ids'].shape
A__ = (batch, encoder_sequence, self._config.encoder_hidden_size)
A__ = dummy_input.pop('input_ids' )
A__ = dummy_input.pop('attention_mask' )
A__ = torch.zeros(_snake_case )
return common_inputs
class __lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
def _a ( self : Optional[Any] , _snake_case : Tuple ):
"""simple docstring"""
return VisionEncoderDecoderEncoderOnnxConfig(_snake_case )
def _a ( self : int , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any = "default" ):
"""simple docstring"""
A__ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_snake_case , _snake_case )
| 701 |
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ):
"""simple docstring"""
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.framework
A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def _a ( self : List[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.model(**_snake_case )
return model_outputs
def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case )
| 52 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( _lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **A_ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_UpperCAmelCase : Tuple = deprecated_arg[3:]
setattr(self , _A , not kwargs.pop(_A ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
_UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
_UpperCAmelCase : str = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
_UpperCAmelCase : Any = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**_A )
__SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'''help''': '''Trace the models using torchscript'''} )
__SCREAMING_SNAKE_CASE = field(default=_lowerCamelCase , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
__SCREAMING_SNAKE_CASE = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def __snake_case( self ):
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
_UpperCAmelCase : List[str] = torch.device("""cpu""" )
_UpperCAmelCase : Optional[Any] = 0
elif is_torch_tpu_available():
_UpperCAmelCase : Tuple = xm.xla_device()
_UpperCAmelCase : Optional[int] = 0
else:
_UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
_UpperCAmelCase : List[Any] = torch.cuda.device_count()
return device, n_gpu
@property
def __snake_case( self ):
return is_torch_tpu_available() and self.tpu
@property
def __snake_case( self ):
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __snake_case( self ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def __snake_case( self ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def __snake_case( self ):
return self.n_gpu > 0
| 643 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = '''▁'''
UpperCAmelCase_ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
UpperCAmelCase_ : List[Any] = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
UpperCAmelCase_ : Any = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
UpperCAmelCase_ : Optional[Any] = {
'''ernie-m-base''': 5_1_4,
'''ernie-m-large''': 5_1_4,
}
UpperCAmelCase_ : List[Any] = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : List[str] = ["input_ids"]
lowercase : Tuple = VOCAB_FILES_NAMES
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self , _A , _A=None , _A=False , _A="utf8" , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A = None , **_A , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , vocab_file=_A , encoding=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
_SCREAMING_SNAKE_CASE =do_lower_case
_SCREAMING_SNAKE_CASE =sentencepiece_model_ckpt
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
_SCREAMING_SNAKE_CASE =self.load_vocab(filepath=_A )
else:
_SCREAMING_SNAKE_CASE ={self.sp_model.id_to_piece(_A ): id for id in range(self.sp_model.get_piece_size() )}
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.vocab.items()}
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if text is None:
return None
_SCREAMING_SNAKE_CASE =self.tokenize(_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ='''''', []
for i, ch in enumerate(_A ):
if ch in self.SP_CHAR_MAPPING:
_SCREAMING_SNAKE_CASE =self.SP_CHAR_MAPPING.get(_A )
else:
_SCREAMING_SNAKE_CASE =unicodedata.normalize('''NFKC''' , _A )
if self.is_whitespace(_A ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_A ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =normalized_text, [], 0
if self.do_lower_case:
_SCREAMING_SNAKE_CASE =text.lower()
for token in split_tokens:
if token[:1] == "▁":
_SCREAMING_SNAKE_CASE =token[1:]
_SCREAMING_SNAKE_CASE =text[offset:].index(_A ) + offset
_SCREAMING_SNAKE_CASE =start + len(_A )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
_SCREAMING_SNAKE_CASE =end
return token_mapping
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return len(self.vocab )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.__dict__.copy()
_SCREAMING_SNAKE_CASE =None
return state
def __setstate__( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_A , _A ) for c in text) )
def UpperCamelCase_ ( self , _A , _A=False , _A=6_4 , _A=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
_SCREAMING_SNAKE_CASE =True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
_SCREAMING_SNAKE_CASE =self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
_SCREAMING_SNAKE_CASE =self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
_SCREAMING_SNAKE_CASE =self.sp_model.EncodeAsPieces(_A )
else:
_SCREAMING_SNAKE_CASE =self.sp_model.SampleEncodeAsPieces(_A , _A , _A )
_SCREAMING_SNAKE_CASE =[]
for pi, piece in enumerate(_A ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_A ) and pi != 0:
new_pieces.append(_A )
continue
else:
continue
_SCREAMING_SNAKE_CASE =0
for i, chunk in enumerate(_A ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_A ) or self.is_punct(_A ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_A )
_SCREAMING_SNAKE_CASE =i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_SCREAMING_SNAKE_CASE =i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_SCREAMING_SNAKE_CASE =i
if len(_A ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.convert_ids_to_tokens(_A )
_SCREAMING_SNAKE_CASE =''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
return self.reverse_vocab.get(_A , self.unk_token )
def UpperCamelCase_ ( self , _A , _A=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase_ ( self , _A , _A=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase_ ( self , _A , _A=None , _A=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def UpperCamelCase_ ( self , _A , _A = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_A ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_A ) + 1) + [1] * (len(_A ) + 3)
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_A ) == 1:
_SCREAMING_SNAKE_CASE =unicodedata.category(_A )
if cat == "Zs":
return True
return False
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
with io.open(_A , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(_A ):
_SCREAMING_SNAKE_CASE =line.rstrip('''\n''' )
_SCREAMING_SNAKE_CASE =int(_A )
return token_to_idx
def UpperCamelCase_ ( self , _A , _A = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0
if os.path.isdir(_A ):
_SCREAMING_SNAKE_CASE =os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
_SCREAMING_SNAKE_CASE =(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
_SCREAMING_SNAKE_CASE =token_index
writer.write(token + '''\n''' )
index += 1
_SCREAMING_SNAKE_CASE =os.path.join(_A , '''sentencepiece.bpe.model''' )
with open(_A , '''wb''' ) as fi:
_SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto()
fi.write(_A )
return (vocab_file,)
| 255 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: int=13 ,lowerCamelCase_: List[str]=7 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Tuple=99 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: int=36 ,lowerCamelCase_: str=6 ,lowerCamelCase_: int=6 ,lowerCamelCase_: Any=6 ,lowerCamelCase_: Tuple=37 ,lowerCamelCase_: List[str]="gelu" ,lowerCamelCase_: Dict=0.1 ,lowerCamelCase_: Optional[Any]=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Optional[Any]=16 ,lowerCamelCase_: List[Any]=2 ,lowerCamelCase_: Dict=0.0_2 ,lowerCamelCase_: Union[str, Any]=3 ,lowerCamelCase_: str=4 ,lowerCamelCase_: Tuple=None ,) -> Union[str, Any]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Union[str, Any] = use_input_mask
UpperCAmelCase_ : str = use_token_type_ids
UpperCAmelCase_ : str = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Tuple = embedding_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_hidden_groups
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : Any = type_vocab_size
UpperCAmelCase_ : str = type_sequence_label_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : List[str] = num_labels
UpperCAmelCase_ : Any = num_choices
UpperCAmelCase_ : int = scope
def A__ ( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Tuple = None
if self.use_input_mask:
UpperCAmelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: List[Any] ) -> Optional[Any]:
return AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,num_hidden_groups=self.num_hidden_groups ,)
def A__ ( self: Any ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = AlbertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = AlbertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,sentence_order_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape ,(self.batch_size, config.num_labels) )
def A__ ( self: Dict ,lowerCamelCase_: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ) -> int:
UpperCAmelCase_ : Union[str, Any] = AlbertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: List[str] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Dict = AlbertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = self.num_labels
UpperCAmelCase_ : str = AlbertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : List[Any] = AlbertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : List[str] = AlbertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : int = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Dict = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : str = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[Any] = True
def A__ ( self: Any ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]:
UpperCAmelCase_ : Tuple = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : List[str] = AlbertModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Any ) -> Any:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def A__ ( self: int ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Any ) -> Tuple:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@slow
def A__ ( self: Optional[int] ) -> Optional[int]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = AlbertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = AlbertModel.from_pretrained("""albert-base-v2""" )
UpperCAmelCase_ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCAmelCase_ : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase_ : Any = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowerCamelCase_ ,atol=1e-4 ) )
| 322 |
from __future__ import annotations
def lowerCamelCase_ ( _a : str , _a : list[str] | None = None , _a : dict[str, float] | None = None , _a : bool = False , ):
'''simple docstring'''
UpperCAmelCase_ : int = cipher_alphabet or [chr(_a ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCAmelCase_ : int = {
"""a""": 0.0_8_4_9_7,
"""b""": 0.0_1_4_9_2,
"""c""": 0.0_2_2_0_2,
"""d""": 0.0_4_2_5_3,
"""e""": 0.1_1_1_6_2,
"""f""": 0.0_2_2_2_8,
"""g""": 0.0_2_0_1_5,
"""h""": 0.0_6_0_9_4,
"""i""": 0.0_7_5_4_6,
"""j""": 0.0_0_1_5_3,
"""k""": 0.0_1_2_9_2,
"""l""": 0.0_4_0_2_5,
"""m""": 0.0_2_4_0_6,
"""n""": 0.0_6_7_4_9,
"""o""": 0.0_7_5_0_7,
"""p""": 0.0_1_9_2_9,
"""q""": 0.0_0_0_9_5,
"""r""": 0.0_7_5_8_7,
"""s""": 0.0_6_3_2_7,
"""t""": 0.0_9_3_5_6,
"""u""": 0.0_2_7_5_8,
"""v""": 0.0_0_9_7_8,
"""w""": 0.0_2_5_6_0,
"""x""": 0.0_0_1_5_0,
"""y""": 0.0_1_9_9_4,
"""z""": 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
UpperCAmelCase_ : Tuple = frequencies_dict
if not case_sensitive:
UpperCAmelCase_ : Any = ciphertext.lower()
# Chi squared statistic values
UpperCAmelCase_ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_a ) ):
UpperCAmelCase_ : Optional[int] = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCAmelCase_ : Any = (alphabet_letters.index(letter.lower() ) - shift) % len(
_a )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCAmelCase_ : int = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCAmelCase_ : Tuple = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase_ : int = decrypted_with_shift.lower().count(_a )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase_ : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase_ : List[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase_ : List[str] = decrypted_with_shift.count(_a )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase_ : Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase_ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCAmelCase_ : List[str] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_a : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCAmelCase_ : int = min(
_a , key=_a , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 322 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 50 ):
'''simple docstring'''
lowerCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 532 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 8 ):
'''simple docstring'''
lowerCAmelCase = ascii_letters + digits + punctuation
return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
i -= len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = i // 3
lowerCAmelCase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCAmelCase = (
chars_incl
+ random(SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
+ random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
)
lowerCAmelCase = list(SCREAMING_SNAKE_CASE )
shuffle(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
pass # Put your code here...
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
pass # Put your code here...
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
pass # Put your code here...
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 8 ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCAmelCase = any(char in ascii_uppercase for char in password )
lowerCAmelCase = any(char in ascii_lowercase for char in password )
lowerCAmelCase = any(char in digits for char in password )
lowerCAmelCase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = int(input("""Please indicate the max length of your password: """ ).strip() )
lowerCAmelCase = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 532 | 1 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
# Initialise PyTorch model
__lowerCamelCase : int = BigBirdConfig.from_json_file(lowerCamelCase__ )
print(F"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
__lowerCamelCase : Union[str, Any] = BigBirdForQuestionAnswering(lowerCamelCase__ )
else:
__lowerCamelCase : Union[str, Any] = BigBirdForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(lowerCamelCase__ , lowerCamelCase__ , is_trivia_qa=lowerCamelCase__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
a =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
a =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 702 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a ={
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 337 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : List[str] = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = 'speech_to_text'
_A = ['past_key_values']
_A = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , __a=1_00_00 , __a=12 , __a=20_48 , __a=4 , __a=6 , __a=20_48 , __a=4 , __a=0.0 , __a=0.0 , __a=True , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=2 , __a=True , __a=1 , __a=0 , __a=2 , __a=60_00 , __a=10_24 , __a=2 , __a=(5, 5) , __a=10_24 , __a=80 , __a=1 , **__a , ):
'''simple docstring'''
lowerCamelCase = vocab_size
lowerCamelCase = d_model
lowerCamelCase = encoder_ffn_dim
lowerCamelCase = encoder_layers
lowerCamelCase = encoder_attention_heads
lowerCamelCase = decoder_ffn_dim
lowerCamelCase = decoder_layers
lowerCamelCase = decoder_attention_heads
lowerCamelCase = dropout
lowerCamelCase = attention_dropout
lowerCamelCase = activation_dropout
lowerCamelCase = activation_function
lowerCamelCase = init_std
lowerCamelCase = encoder_layerdrop
lowerCamelCase = decoder_layerdrop
lowerCamelCase = use_cache
lowerCamelCase = encoder_layers
lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase = max_source_positions
lowerCamelCase = max_target_positions
lowerCamelCase = num_conv_layers
lowerCamelCase = list(__a )
lowerCamelCase = conv_channels
lowerCamelCase = input_feat_per_channel
lowerCamelCase = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , ) | 623 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
with open(UpperCAmelCase__ ) as metadata_file:
lowerCamelCase = json.load(UpperCAmelCase__ )
lowerCamelCase = LukeConfig(use_entity_aware_attention=UpperCAmelCase__ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
lowerCamelCase = torch.load(UpperCAmelCase__ , map_location="cpu" )
# Load the entity vocab file
lowerCamelCase = load_entity_vocab(UpperCAmelCase__ )
lowerCamelCase = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCamelCase = AddedToken("<ent>" , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ )
lowerCamelCase = AddedToken("<ent2>" , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = LukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
lowerCamelCase = state_dict["embeddings.word_embeddings.weight"]
lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
lowerCamelCase = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
lowerCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCamelCase = F"""encoder.layer.{layer_index}.attention.self."""
lowerCamelCase = state_dict[prefix + matrix_name]
lowerCamelCase = state_dict[prefix + matrix_name]
lowerCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCamelCase = state_dict["entity_embeddings.entity_embeddings.weight"]
lowerCamelCase = entity_emb[entity_vocab["[MASK]"]]
lowerCamelCase = LukeModel(config=UpperCAmelCase__ ).eval()
lowerCamelCase , lowerCamelCase = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
if not (len(UpperCAmelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(UpperCAmelCase__ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
lowerCamelCase = LukeTokenizer.from_pretrained(UpperCAmelCase__ , task="entity_classification" )
lowerCamelCase = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
lowerCamelCase = (39, 42)
lowerCamelCase = tokenizer(UpperCAmelCase__ , entity_spans=[span] , add_prefix_space=UpperCAmelCase__ , return_tensors="pt" )
lowerCamelCase = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
lowerCamelCase = torch.Size((1, 42, 1024) )
lowerCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
lowerCamelCase = torch.Size((1, 42, 768) )
lowerCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowerCamelCase = torch.Size((1, 1, 1024) )
lowerCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
lowerCamelCase = torch.Size((1, 1, 768) )
lowerCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = {}
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(UpperCAmelCase__ ):
lowerCamelCase , lowerCamelCase = line.rstrip().split("\t" )
lowerCamelCase = index
return entity_vocab
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ : Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 623 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 151 |
def UpperCAmelCase_ ( ):
return [
a * b * (1_000 - a - b)
for a in range(1, 999 )
for b in range(__UpperCamelCase, 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 151 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 715 |
import cva
import numpy as np
class snake_case_ :
def __init__( self : int , _snake_case : float , _snake_case : int )->int:
'''simple docstring'''
if k in (0.04, 0.06):
__lowerCAmelCase : str = k
__lowerCAmelCase : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] )->str:
'''simple docstring'''
return str(self.k )
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : str )->tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = cva.imread(_snake_case , 0 )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = img.shape
__lowerCAmelCase : list[list[int]] = []
__lowerCAmelCase : Any = img.copy()
__lowerCAmelCase : int = cva.cvtColor(_snake_case , cva.COLOR_GRAY2RGB )
__lowerCAmelCase , __lowerCAmelCase : Tuple = np.gradient(_snake_case )
__lowerCAmelCase : Optional[int] = dx**2
__lowerCAmelCase : Union[str, Any] = dy**2
__lowerCAmelCase : Tuple = dx * dy
__lowerCAmelCase : Dict = 0.04
__lowerCAmelCase : Optional[Any] = self.window_size // 2
for y in range(_snake_case , h - offset ):
for x in range(_snake_case , w - offset ):
__lowerCAmelCase : str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase : Any = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase : Optional[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase : int = (wxx * wyy) - (wxy**2)
__lowerCAmelCase : int = wxx + wyy
__lowerCAmelCase : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_UpperCAmelCase = HarrisCorner(0.04, 3)
_UpperCAmelCase , _UpperCAmelCase = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 240 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int = 4_000_000 ):
__UpperCAmelCase = [0, 1]
__UpperCAmelCase = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__UpperCAmelCase = 0
for j in range(len(snake_case_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 49 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : List[str] = 1_6
_lowerCamelCase : Any = 3_2
def _UpperCAmelCase (UpperCamelCase_ : Accelerator , UpperCamelCase_ : int = 16 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_lowerCAmelCase : Tuple = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase_ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase : Dict = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase_ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
_lowerCAmelCase : str = 8
else:
_lowerCAmelCase : str = None
return tokenizer.pad(
UpperCamelCase_ , padding="""longest""" , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_lowerCAmelCase : Dict = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase : Dict = mocked_dataloaders # noqa: F811
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase_ ) == "1":
_lowerCAmelCase : str = 2
# Initialize accelerator
_lowerCAmelCase : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : List[str] = config["""lr"""]
_lowerCAmelCase : Dict = int(config["""num_epochs"""] )
_lowerCAmelCase : Dict = int(config["""seed"""] )
_lowerCAmelCase : Any = int(config["""batch_size"""] )
_lowerCAmelCase : Optional[int] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=UpperCamelCase_ )
def inner_training_loop(UpperCamelCase_ : List[Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(UpperCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase : int = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase : List[Any] = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : str = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
# Instantiate scheduler
_lowerCAmelCase : Any = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCAmelCase : Optional[int] = model(**UpperCamelCase_ )
_lowerCAmelCase : int = outputs.loss
accelerator.backward(UpperCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : Any = model(**UpperCamelCase_ )
_lowerCAmelCase : str = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase : List[str] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
_lowerCAmelCase : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , UpperCamelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_lowerCAmelCase : Any = parser.parse_args()
_lowerCAmelCase : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 429 | 0 |
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
return "\n".join(
f"{number} * {i} = {number * i}" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 717 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 561 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : List[Any] = 'docs/source/en/_toctree.yml'
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = defaultdict(__UpperCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case_ = [key for key, value in counts.items() if value > 1]
snake_case_ = []
for duplicate_key in duplicates:
snake_case_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__UpperCAmelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__UpperCAmelCase, key=lambda __UpperCAmelCase : s["title"].lower() )
def __magic_name__ ( __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
with open(__UpperCAmelCase, encoding='''utf-8''' ) as f:
snake_case_ = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ = content[api_idx]['''sections''']
# Then to the model doc
snake_case_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case_ = api_doc[model_idx]['''sections''']
snake_case_ = [(idx, section) for idx, section in enumerate(__UpperCAmelCase ) if '''sections''' in section]
snake_case_ = False
for idx, modality_doc in modalities_docs:
snake_case_ = modality_doc['''sections''']
snake_case_ = clean_model_doc_toc(__UpperCAmelCase )
if old_modality_doc != new_modality_doc:
snake_case_ = True
if overwrite:
snake_case_ = new_modality_doc
if diff:
if overwrite:
snake_case_ = model_doc
snake_case_ = api_doc
with open(__UpperCAmelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(yaml.dump(__UpperCAmelCase, allow_unicode=__UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a : Union[str, Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 640 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a : str = 16
a : Union[str, Any] = 32
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = 16 ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case_ = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=__UpperCAmelCase, max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ = datasets.map(
__UpperCAmelCase, batched=__UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ = 16
elif accelerator.mixed_precision != "no":
snake_case_ = 8
else:
snake_case_ = None
return tokenizer.pad(
__UpperCAmelCase, padding='''longest''', max_length=__UpperCAmelCase, pad_to_multiple_of=__UpperCAmelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets['''train'''], shuffle=__UpperCAmelCase, collate_fn=__UpperCAmelCase, batch_size=__UpperCAmelCase )
snake_case_ = DataLoader(
tokenized_datasets['''validation'''], shuffle=__UpperCAmelCase, collate_fn=__UpperCAmelCase, batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a : Optional[Any] = mocked_dataloaders # noqa: F811
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', __UpperCAmelCase ) == "1":
snake_case_ = 2
# New Code #
snake_case_ = int(args.gradient_accumulation_steps )
snake_case_ = int(args.local_sgd_steps )
# Initialize accelerator
snake_case_ = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=__UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config['''lr''']
snake_case_ = int(config['''num_epochs'''] )
snake_case_ = int(config['''seed'''] )
snake_case_ = int(config['''batch_size'''] )
snake_case_ = evaluate.load('''glue''', '''mrpc''' )
set_seed(__UpperCAmelCase )
snake_case_ ,snake_case_ = get_dataloaders(__UpperCAmelCase, __UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=__UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ = AdamW(params=model.parameters(), lr=__UpperCAmelCase )
# Instantiate scheduler
snake_case_ = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(__UpperCAmelCase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = accelerator.prepare(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# Now we train the model
for epoch in range(__UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCAmelCase, model=__UpperCAmelCase, local_sgd_steps=__UpperCAmelCase, enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCAmelCase ):
snake_case_ = model(**__UpperCAmelCase )
snake_case_ = output.loss
accelerator.backward(__UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**__UpperCAmelCase )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ ,snake_case_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__UpperCAmelCase, references=__UpperCAmelCase, )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:", __UpperCAmelCase )
def __magic_name__ ( ) -> str:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=__UpperCAmelCase, default=__UpperCAmelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''', type=__UpperCAmelCase, default=1, help='''The number of minibatches to be ran before gradients are accumulated.''', )
parser.add_argument(
'''--local_sgd_steps''', type=__UpperCAmelCase, default=8, help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
snake_case_ = parser.parse_args()
snake_case_ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCAmelCase, __UpperCAmelCase )
if __name__ == "__main__":
main()
| 640 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
A_ = ['pixel_values']
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = 1 / 2_55 , A_ = True , A_ = None , A_ = True , **A_ , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 2_24}
_lowerCamelCase = get_size_dict(A_ , default_to_square=A_ )
_lowerCamelCase = crop_size if crop_size is not None else {'''height''': 2_56, '''width''': 2_56}
_lowerCamelCase = get_size_dict(A_ , param_name='''crop_size''' )
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = resample
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_center_crop
_lowerCamelCase = crop_size
_lowerCamelCase = do_flip_channel_order
def UpperCamelCase_ ( self , A_ , A_ , A_ = PIL.Image.BILINEAR , A_ = None , **A_ , ) -> np.ndarray:
"""simple docstring"""
_lowerCamelCase = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
_lowerCamelCase = get_resize_output_image_size(A_ , size=size['''shortest_edge'''] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
"""simple docstring"""
_lowerCamelCase = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A_ , size=(size['''height'''], size['''width''']) , data_format=A_ , **A_ )
def UpperCamelCase_ ( self , A_ , A_ , A_ = None , **A_ , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def UpperCamelCase_ ( self , A_ , A_ = None ) -> np.ndarray:
"""simple docstring"""
return flip_channel_order(A_ , data_format=A_ )
def UpperCamelCase_ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> PIL.Image.Image:
"""simple docstring"""
_lowerCamelCase = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase = resample if resample is not None else self.resample
_lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_lowerCamelCase = size if size is not None else self.size
_lowerCamelCase = get_size_dict(A_ , default_to_square=A_ )
_lowerCamelCase = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase = get_size_dict(A_ , param_name='''crop_size''' )
_lowerCamelCase = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
_lowerCamelCase = [to_numpy_array(A_ ) for image in images]
if do_resize:
_lowerCamelCase = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
_lowerCamelCase = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
_lowerCamelCase = [self.rescale(image=A_ , scale=A_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_lowerCamelCase = [self.flip_channel_order(image=A_ ) for image in images]
_lowerCamelCase = [to_channel_dimension_format(A_ , A_ ) for image in images]
_lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
def UpperCamelCase_ ( self , A_ , A_ = None ) -> int:
"""simple docstring"""
_lowerCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(A_ ):
_lowerCamelCase = target_sizes.numpy()
_lowerCamelCase = []
for idx in range(len(A_ ) ):
_lowerCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ )
_lowerCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
_lowerCamelCase = logits.argmax(dim=1 )
_lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 638 | import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ ) | 638 | 1 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_a = """bart"""
_a = True
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase = qar_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase = sas_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = make_qa_sas_model(
model_name='''t5-small''', from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''', device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = faiss.StandardGpuResources()
_UpperCamelCase = datasets.load_dataset(path='''wiki_snippets''', name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''', dtype='''float32''', mode='''r''', shape=(wikiaab_passages.num_rows, 1_28), )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
_UpperCamelCase = faiss.index_cpu_to_gpu(__snake_case, 1, __snake_case )
wikiaab_gpu_index_flat.add(__snake_case ) # TODO fix for larger GPU
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
_UpperCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = datasets.load_dataset('''eli5''', name='''LFQA_reddit''' )
_UpperCamelCase = elia['''train_eli5''']
_UpperCamelCase = np.memmap(
'''eli5_questions_reps.dat''', dtype='''float32''', mode='''r''', shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__snake_case )
return (elia_train, eli5_train_q_index)
_a , _a , _a = load_indexes()
_a , _a , _a , _a = load_models()
_a , _a = load_train_data()
def lowerCamelCase__ ( __snake_case, __snake_case=10 ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = embed_questions_for_retrieval([question], __snake_case, __snake_case )
_UpperCamelCase , _UpperCamelCase = eli5_train_q_index.search(__snake_case, __snake_case )
_UpperCamelCase = [elia_train[int(__snake_case )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( __snake_case, __snake_case="wiki40b", __snake_case="dense", __snake_case=10 ) -> List[str]:
"""simple docstring"""
if source == "none":
_UpperCamelCase , _UpperCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase , _UpperCamelCase = query_qa_dense_index(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
else:
_UpperCamelCase , _UpperCamelCase = query_es_index(
__snake_case, __snake_case, index_name='''english_wiki40b_snippets_100w''', n_results=__snake_case, )
_UpperCamelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase = '''question: {} context: {}'''.format(__snake_case, __snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __snake_case : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __snake_case : None),
} )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=64, __snake_case=2_56, __snake_case=False, __snake_case=2, __snake_case=0.95, __snake_case=0.8 ) -> Dict:
"""simple docstring"""
with torch.no_grad():
_UpperCamelCase = qa_sas_generate(
__snake_case, __snake_case, __snake_case, num_answers=1, num_beams=__snake_case, min_len=__snake_case, max_len=__snake_case, do_sample=__snake_case, temp=__snake_case, top_p=__snake_case, top_k=__snake_case, max_input_length=10_24, device='''cuda:0''', )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_a = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_a = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_a = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_a = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_a = st.sidebar.checkbox("""Demo options""")
if demo_options:
_a = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_a = action_list.index(action_st)
_a = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_a = show_type == """Show full text of passages"""
else:
_a = 3
_a = True
_a = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_a = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_a = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_a = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_a = """wiki40b"""
_a = """dense"""
_a = """beam"""
_a = 2
_a = 64
_a = 256
_a = None
_a = None
_a = st.sidebar.checkbox("""Generation options""")
if generate_options:
_a = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_a = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_a = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_a = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_a = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_a = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_a = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_a = None
# start main text
_a = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_a = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_a = st.text_input("""Enter your question here:""", """""")
else:
_a = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_a , _a = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_a , _a = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_a = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_a = support_list[:10]
_a = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_a , _a = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_a , _a = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_a = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_a = res[1].strip()
if sec_titles == "":
_a = """[{}]({})""".format(res[0], wiki_url)
else:
_a = sec_titles.split(""" & """)
_a = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_a = find_nearest_training(question)
_a = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_a = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_a = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 19 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Union[str, Any] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 668 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
def _A ( A ) -> List[List[ImageInput]]:
if isinstance(A ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
_snake_case = ['''pixel_values''']
def __init__( self , a_ = True , a_ = None , a_ = PILImageResampling.BILINEAR , a_ = True , a_ = None , a_ = True , a_ = 1 / 2_5_5 , a_ = True , a_ = None , a_ = None , **a_ , ) -> None:
super().__init__(**a_ )
lowercase : Optional[int] = size if size is not None else {"shortest_edge": 2_2_4}
lowercase : Optional[int] = get_size_dict(a_ , default_to_square=a_ )
lowercase : str = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
lowercase : Any = get_size_dict(a_ , param_name="crop_size" )
lowercase : int = do_resize
lowercase : List[str] = size
lowercase : Dict = do_center_crop
lowercase : int = crop_size
lowercase : Any = resample
lowercase : Tuple = do_rescale
lowercase : int = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self , a_ , a_ , a_ = PILImageResampling.BILINEAR , a_ = None , **a_ , ) -> np.ndarray:
lowercase : Tuple = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" in size:
lowercase : Tuple = get_resize_output_image_size(a_ , size["shortest_edge"] , default_to_square=a_ )
elif "height" in size and "width" in size:
lowercase : Tuple = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def a__ ( self , a_ , a_ , a_ = None , **a_ , ) -> np.ndarray:
lowercase : str = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ )
def a__ ( self , a_ , a_ , a_ = None , **a_ , ) -> Optional[Any]:
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def a__ ( self , a_ , a_ , a_ , a_ = None , **a_ , ) -> np.ndarray:
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def a__ ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowercase : List[Any] = to_numpy_array(a_ )
if do_resize:
lowercase : Tuple = self.resize(image=a_ , size=a_ , resample=a_ )
if do_center_crop:
lowercase : Tuple = self.center_crop(a_ , size=a_ )
if do_rescale:
lowercase : List[Any] = self.rescale(image=a_ , scale=a_ )
if do_normalize:
lowercase : Optional[Any] = self.normalize(image=a_ , mean=a_ , std=a_ )
lowercase : Any = to_channel_dimension_format(a_ , a_ )
return image
def a__ ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ) -> PIL.Image.Image:
lowercase : Any = do_resize if do_resize is not None else self.do_resize
lowercase : Tuple = resample if resample is not None else self.resample
lowercase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowercase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else self.image_mean
lowercase : List[Any] = image_std if image_std is not None else self.image_std
lowercase : List[Any] = size if size is not None else self.size
lowercase : Optional[Any] = get_size_dict(a_ , default_to_square=a_ )
lowercase : str = crop_size if crop_size is not None else self.crop_size
lowercase : Dict = get_size_dict(a_ , param_name="crop_size" )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
lowercase : Optional[Any] = make_batched(a_ )
lowercase : Tuple = [
[
self._preprocess_image(
image=a_ , do_resize=a_ , size=a_ , resample=a_ , do_center_crop=a_ , crop_size=a_ , do_rescale=a_ , rescale_factor=a_ , do_normalize=a_ , image_mean=a_ , image_std=a_ , data_format=a_ , )
for img in video
]
for video in videos
]
lowercase : int = {"pixel_values": videos}
return BatchFeature(data=a_ , tensor_type=a_ )
| 425 |
'''simple docstring'''
def _A ( A ) -> bool:
return str(A ) == str(A )[::-1]
def _A ( A ) -> int:
return int(A ) + int(str(A )[::-1] )
def _A ( A = 1_0_0_0_0 ) -> int:
lowercase : List[Any] = []
for num in range(1 ,A ):
lowercase : str = 0
lowercase : Optional[Any] = num
while iterations < 5_0:
lowercase : Optional[Any] = sum_reverse(A )
iterations += 1
if is_palindrome(A ):
break
else:
lychrel_nums.append(A )
return len(A )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 425 | 1 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = f'''{sampling_rate}'''
_UpperCAmelCase = """1"""
_UpperCAmelCase = """f32le"""
_UpperCAmelCase = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(lowercase ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_UpperCAmelCase = ffmpeg_process.communicate(lowercase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
_UpperCAmelCase = output_stream[0]
_UpperCAmelCase = np.frombuffer(lowercase ,np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = "f32le" ,):
"""simple docstring"""
_UpperCAmelCase = f'''{sampling_rate}'''
_UpperCAmelCase = """1"""
if format_for_conversion == "s16le":
_UpperCAmelCase = 2
elif format_for_conversion == "f32le":
_UpperCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
_UpperCAmelCase = platform.system()
if system == "Linux":
_UpperCAmelCase = """alsa"""
_UpperCAmelCase = """default"""
elif system == "Darwin":
_UpperCAmelCase = """avfoundation"""
_UpperCAmelCase = """:0"""
elif system == "Windows":
_UpperCAmelCase = """dshow"""
_UpperCAmelCase = """default"""
_UpperCAmelCase = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
_UpperCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_UpperCAmelCase = _ffmpeg_stream(lowercase ,lowercase )
for item in iterator:
yield item
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = None ,lowercase = None ,lowercase = "f32le" ,):
"""simple docstring"""
if stream_chunk_s is not None:
_UpperCAmelCase = stream_chunk_s
else:
_UpperCAmelCase = chunk_length_s
_UpperCAmelCase = ffmpeg_microphone(lowercase ,lowercase ,format_for_conversion=lowercase )
if format_for_conversion == "s16le":
_UpperCAmelCase = np.intaa
_UpperCAmelCase = 2
elif format_for_conversion == "f32le":
_UpperCAmelCase = np.floataa
_UpperCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
_UpperCAmelCase = chunk_length_s / 6
_UpperCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowercase ,(int, float) ):
_UpperCAmelCase = [stride_length_s, stride_length_s]
_UpperCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_UpperCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_UpperCAmelCase = datetime.datetime.now()
_UpperCAmelCase = datetime.timedelta(seconds=lowercase )
for item in chunk_bytes_iter(lowercase ,lowercase ,stride=(stride_left, stride_right) ,stream=lowercase ):
# Put everything back in numpy scale
_UpperCAmelCase = np.frombuffer(item["""raw"""] ,dtype=lowercase )
_UpperCAmelCase = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
_UpperCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase = False ):
"""simple docstring"""
_UpperCAmelCase = B""""""
_UpperCAmelCase , _UpperCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
_UpperCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowercase ) < chunk_len:
_UpperCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowercase ) >= chunk_len:
# We are flushing the accumulator
_UpperCAmelCase = (_stride_left, stride_right)
_UpperCAmelCase = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
_UpperCAmelCase = False
yield item
_UpperCAmelCase = stride_left
_UpperCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowercase ) > stride_left:
_UpperCAmelCase = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
_UpperCAmelCase = False
yield item
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = 2**24 # 16Mo
try:
with subprocess.Popen(lowercase ,stdout=subprocess.PIPE ,bufsize=lowercase ) as ffmpeg_process:
while True:
_UpperCAmelCase = ffmpeg_process.stdout.read(lowercase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 277 | """simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase__ = {"""UserAgent""": UserAgent().random}
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = script.contents[0]
_UpperCAmelCase = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class a :
def __init__( self : Any , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = f'''https://www.instagram.com/{username}/'''
_UpperCAmelCase = self.get_json()
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = requests.get(self.url , headers=__lowerCAmelCase ).text
_UpperCAmelCase = BeautifulSoup(__lowerCAmelCase , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Dict ):
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : int ):
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.user_data["username"]
@property
def lowerCAmelCase_ ( self : List[Any] ):
return self.user_data["full_name"]
@property
def lowerCAmelCase_ ( self : str ):
return self.user_data["biography"]
@property
def lowerCAmelCase_ ( self : str ):
return self.user_data["business_email"]
@property
def lowerCAmelCase_ ( self : List[Any] ):
return self.user_data["external_url"]
@property
def lowerCAmelCase_ ( self : int ):
return self.user_data["edge_followed_by"]["count"]
@property
def lowerCAmelCase_ ( self : Tuple ):
return self.user_data["edge_follow"]["count"]
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowerCAmelCase_ ( self : int ):
return self.user_data["profile_pic_url_hd"]
@property
def lowerCAmelCase_ ( self : str ):
return self.user_data["is_verified"]
@property
def lowerCAmelCase_ ( self : int ):
return self.user_data["is_private"]
def __UpperCAmelCase ( lowercase = "github" ):
"""simple docstring"""
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
_UpperCAmelCase = InstagramUser(lowercase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowercase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 277 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__snake_case = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__snake_case = TaTokenizerFast
__snake_case = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__snake_case = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 400 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 400 | 1 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self : Any ):
torch.manual_seed(0 )
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = PNDMScheduler()
_UpperCamelCase = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pndm.to(__SCREAMING_SNAKE_CASE )
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type='''numpy''' ).images
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''google/ddpm-cifar10-32'''
_UpperCamelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
_UpperCamelCase = PNDMScheduler()
_UpperCamelCase = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pndm.to(__SCREAMING_SNAKE_CASE )
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pndm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 10 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
__SCREAMING_SNAKE_CASE = KarrasVeScheduler()
__SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type="""numpy""" ).images
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type="""numpy""" , return_dict=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256"""
__SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = KarrasVeScheduler()
__SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type="""numpy""" ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 627 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 706 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _lowerCAmelCase( UpperCAmelCase_ : List[Any] ) -> List[str]:
lowerCAmelCase__ = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCAmelCase__ = 6
lowerCAmelCase__ = 128
lowerCAmelCase__ = (2, 2, 18, 2)
lowerCAmelCase__ = (4, 8, 16, 32)
elif "large" in model_name:
lowerCAmelCase__ = 12
lowerCAmelCase__ = 192
lowerCAmelCase__ = (2, 2, 18, 2)
lowerCAmelCase__ = (6, 12, 24, 48)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
lowerCAmelCase__ = window_size
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = depths
lowerCAmelCase__ = num_heads
return config
def _lowerCAmelCase( UpperCAmelCase_ : str ) -> List[str]:
if "encoder.mask_token" in name:
lowerCAmelCase__ = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
lowerCAmelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowerCAmelCase__ = """layernorm.weight"""
if name == "encoder.norm.bias":
lowerCAmelCase__ = """layernorm.bias"""
if "decoder" in name:
pass
else:
lowerCAmelCase__ = """swin.""" + name
return name
def _lowerCAmelCase( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(UpperCAmelCase_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCAmelCase__ = key.split(""".""" )
lowerCAmelCase__ = int(key_split[2] )
lowerCAmelCase__ = int(key_split[4] )
lowerCAmelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[
dim : dim * 2, :
]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[
:dim
]
lowerCAmelCase__ = val[
dim : dim * 2
]
lowerCAmelCase__ = val[
-dim:
]
else:
lowerCAmelCase__ = val
return orig_state_dict
def _lowerCAmelCase( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) -> Tuple:
lowerCAmelCase__ = torch.load(UpperCAmelCase_ , map_location="""cpu""" )["""model"""]
lowerCAmelCase__ = get_swin_config(UpperCAmelCase_ )
lowerCAmelCase__ = SwinForMaskedImageModeling(UpperCAmelCase_ )
model.eval()
lowerCAmelCase__ = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
lowerCAmelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase__ = ViTImageProcessor(size={"""height""": 192, """width""": 192} )
lowerCAmelCase__ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
lowerCAmelCase__ = image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCAmelCase_ ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
print(F'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(F'''microsoft/{model_name}''' )
image_processor.push_to_hub(F'''microsoft/{model_name}''' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 211 | 0 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 278 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
snake_case__ : int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = CLIPConfig
_a = ["CLIPEncoderLayer"]
def __init__( self : Tuple , __a : CLIPConfig ) ->Union[str, Any]:
super().__init__(__a )
lowerCamelCase_ : List[Any] = CLIPVisionModelWithProjection(config.vision_config )
lowerCamelCase_ : int = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCamelCase_ : int = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def _lowerCAmelCase ( self : Dict , __a : List[Any] , __a : Tuple , __a : int=0.5 , __a : Optional[Any]=0.5 ) ->Union[str, Any]:
lowerCamelCase_ : Dict = self.vision_model(__a )[0]
lowerCamelCase_ : str = self.p_head(__a )
lowerCamelCase_ : Union[str, Any] = nsfw_detected.flatten()
lowerCamelCase_ : Tuple = nsfw_detected > p_threshold
lowerCamelCase_ : Dict = nsfw_detected.tolist()
if any(__a ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(__a ):
if nsfw_detected_:
lowerCamelCase_ : int = np.zeros(images[idx].shape )
lowerCamelCase_ : Union[str, Any] = self.w_head(__a )
lowerCamelCase_ : List[str] = watermark_detected.flatten()
lowerCamelCase_ : Dict = watermark_detected > w_threshold
lowerCamelCase_ : Optional[int] = watermark_detected.tolist()
if any(__a ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(__a ):
if watermark_detected_:
lowerCamelCase_ : Tuple = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 278 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a= {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
a= _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 287 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a= {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a= ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
a= _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 287 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
UpperCamelCase : int = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gptsan-japanese'
_lowercase = [
'past_key_values',
]
_lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : str , lowerCamelCase__ : Tuple=36_000 , lowerCamelCase__ : Optional[int]=1_280 , lowerCamelCase__ : Any=1_024 , lowerCamelCase__ : Dict=8_192 , lowerCamelCase__ : Any=4_096 , lowerCamelCase__ : Optional[int]=128 , lowerCamelCase__ : Optional[int]=10 , lowerCamelCase__ : List[str]=0 , lowerCamelCase__ : int=16 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : List[Any]=128 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : int=1E-5 , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : Any="float32" , lowerCamelCase__ : str=False , lowerCamelCase__ : Any=False , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : str=0.002 , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Union[str, Any]=35_998 , lowerCamelCase__ : List[str]=35_995 , lowerCamelCase__ : int=35_999 , **lowerCamelCase__ : int , ):
a__ : Optional[Any] = vocab_size
a__ : Tuple = max_position_embeddings
a__ : str = d_model
a__ : Tuple = d_ff
a__ : Tuple = d_ext
a__ : List[Any] = d_spout
a__ : str = num_switch_layers
a__ : List[Any] = num_ext_layers
a__ : List[Any] = num_switch_layers + num_ext_layers
a__ : Dict = num_heads
a__ : List[str] = num_experts
a__ : Optional[Any] = expert_capacity
a__ : str = dropout_rate
a__ : int = layer_norm_epsilon
a__ : Any = router_bias
a__ : Dict = router_jitter_noise
a__ : Any = router_dtype
a__ : Optional[Any] = router_ignore_padding_tokens
a__ : Optional[Any] = output_hidden_states
a__ : List[Any] = output_attentions
a__ : Any = initializer_factor
a__ : List[Any] = output_router_logits
a__ : Dict = use_cache
super().__init__(
separator_token_id=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 37 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 0 |
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 510 |
import string
from math import logaa
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
UpperCAmelCase__ =document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase__ =corpus_without_punctuation.split("\n" )
UpperCAmelCase__ =term.lower()
return (len([doc for doc in docs if term in doc] ), len(A ))
def _UpperCAmelCase ( A , A , A=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
return round(tf * idf , 3 )
| 510 | 1 |
lowerCamelCase_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def __magic_name__ ( __a : bytes ):
'''simple docstring'''
if not isinstance(__a , __a ):
UpperCamelCase__ = f"a bytes-like object is required, not \'{data.__class__.__name__}\'"
raise TypeError(__a )
UpperCamelCase__ = """""".join(bin(__a )[2:].zfill(8 ) for byte in data )
UpperCamelCase__ = len(__a ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCamelCase__ = b"""=""" * ((6 - len(__a ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__a ) % 6)
else:
UpperCamelCase__ = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__a ) , 6 ) ).encode()
+ padding
)
def __magic_name__ ( __a : str ):
'''simple docstring'''
if not isinstance(__a , __a ) and not isinstance(__a , __a ):
UpperCamelCase__ = (
"""argument should be a bytes-like object or ASCII string, """
f"not \'{encoded_data.__class__.__name__}\'"
)
raise TypeError(__a )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__a , __a ):
try:
UpperCamelCase__ = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
UpperCamelCase__ = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__a ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCamelCase__ = encoded_data[:-padding]
UpperCamelCase__ = """""".join(
bin(B64_CHARSET.index(__a ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCamelCase__ = """""".join(
bin(B64_CHARSET.index(__a ) )[2:].zfill(6 ) for char in encoded_data )
UpperCamelCase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__a ) , 8 )
]
return bytes(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 513 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int | str ):
UpperCAmelCase = str(SCREAMING_SNAKE_CASE )
return n == n[::-1]
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int = 100_0000 ):
UpperCAmelCase = 0
for i in range(1 , SCREAMING_SNAKE_CASE ):
if is_palindrome(SCREAMING_SNAKE_CASE ) and is_palindrome(bin(SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 447 | 0 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor | 717 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase : Any = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
lowercase : Tuple = parser.parse_args()
lowercase : str = 'cpu'
lowercase : List[Any] = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
lowercase : Any = 'path-to-your-trained-model'
lowercase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase : Dict = pipe.to(device)
# to channels last
lowercase : int = pipe.unet.to(memory_format=torch.channels_last)
lowercase : int = pipe.vae.to(memory_format=torch.channels_last)
lowercase : Dict = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase : List[str] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase : Any = torch.randn(2, 4, 64, 64)
lowercase : Union[str, Any] = torch.rand(1) * 999
lowercase : List[Any] = torch.randn(2, 77, 768)
lowercase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
lowercase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase : str = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : Dict = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase : Dict = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase : Any = 666
lowercase : int = torch.Generator(device).manual_seed(seed)
lowercase : List[str] = {'generator': generator}
if args.steps is not None:
lowercase : List[str] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase : Dict = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png') | 94 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["image_processor", "tokenizer"]
UpperCAmelCase = "AutoImageProcessor"
UpperCAmelCase = "AutoTokenizer"
def __init__( self : Union[str, Any] , _A : Dict , _A : Union[str, Any] ):
super().__init__(_A , _A )
_UpperCamelCase = self.image_processor
def __call__( self : int , _A : Tuple=None , _A : Tuple=None , _A : List[Any]=None , **_A : Dict ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase = self.tokenizer(_A , return_tensors=_A , **_A )
if images is not None:
_UpperCamelCase = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def UpperCamelCase_ ( self : Any , *_A : List[Any] , **_A : str ):
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCamelCase_ ( self : Any , *_A : int , **_A : Dict ):
return self.tokenizer.decode(*_A , **_A )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return ["input_ids", "attention_mask", "pixel_values"]
| 10 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Optional[int] = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 | 0 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCamelCase ={1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class A ( nn.Module ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase ):
super().__init__()
UpperCamelCase_ : List[str] = torchvision.models.resnetaaa(pretrained=__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = list(model.children() )[:-2]
UpperCamelCase_ : Union[str, Any] = nn.Sequential(*__lowerCAmelCase )
UpperCamelCase_ : Optional[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
UpperCamelCase_ : int = self.pool(self.model(__lowerCAmelCase ) )
UpperCamelCase_ : Optional[int] = torch.flatten(__lowerCAmelCase , start_dim=2 )
UpperCamelCase_ : Optional[Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Dict = [json.loads(__lowerCAmelCase ) for l in open(__lowerCAmelCase )]
UpperCamelCase_ : Union[str, Any] = os.path.dirname(__lowerCAmelCase )
UpperCamelCase_ : Any = tokenizer
UpperCamelCase_ : int = labels
UpperCamelCase_ : List[str] = len(__lowerCAmelCase )
UpperCamelCase_ : Dict = max_seq_length
UpperCamelCase_ : int = transforms
def __len__( self ):
return len(self.data )
def __getitem__( self , __lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=__lowerCAmelCase ) )
UpperCamelCase_ : Tuple = sentence[0], sentence[1:-1], sentence[-1]
UpperCamelCase_ : List[Any] = sentence[: self.max_seq_length]
UpperCamelCase_ : int = torch.zeros(self.n_classes )
UpperCamelCase_ : Optional[int] = 1
UpperCamelCase_ : str = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
UpperCamelCase_ : List[str] = self.transforms(__lowerCAmelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def snake_case ( a_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = [len(row["""sentence"""] ) for row in batch]
UpperCamelCase_ : Union[str, Any] = len(a_ ), max(a_ )
UpperCamelCase_ : Any = torch.zeros(a_ , a_ , dtype=torch.long )
UpperCamelCase_ : Optional[int] = torch.zeros(a_ , a_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(a_ , a_ ) ):
UpperCamelCase_ : Optional[int] = input_row["""sentence"""]
UpperCamelCase_ : str = 1
UpperCamelCase_ : Dict = torch.stack([row["""image"""] for row in batch] )
UpperCamelCase_ : Dict = torch.stack([row["""label"""] for row in batch] )
UpperCamelCase_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch] )
UpperCamelCase_ : int = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def snake_case ( ) -> Tuple:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def snake_case ( ) -> Any:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 713 |
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
UpperCamelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case ( a_ : str ) -> int:
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(a_ ):
return ext
raise Exception(
f"Unable to determine file format from file extension {path}. "
f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def snake_case ( a_ : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Any = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCamelCase_ : List[Any] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
UpperCamelCase_ : Tuple = PipelineDataFormat.from_str(
format=a_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(a_ , a_ )
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = nlp
UpperCamelCase_ : Optional[Any] = reader
@staticmethod
def _UpperCAmelCase ( __lowerCAmelCase ):
UpperCamelCase_ : List[str] = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=__lowerCAmelCase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=__lowerCAmelCase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=__lowerCAmelCase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=__lowerCAmelCase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=__lowerCAmelCase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=__lowerCAmelCase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=__lowerCAmelCase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=__lowerCAmelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ , UpperCamelCase_ : str = self._nlp, []
for entry in self._reader:
UpperCamelCase_ : List[Any] = nlp(**__lowerCAmelCase ) if self._reader.is_multi_columns else nlp(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
outputs.append(__lowerCAmelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCamelCase_ : int = self._reader.save_binary(__lowerCAmelCase )
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(__lowerCAmelCase )
| 543 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = '''data2vec-vision'''
def __init__( self : List[str] ,__A : Optional[Any]=768 ,__A : List[str]=12 ,__A : Tuple=12 ,__A : List[str]=3072 ,__A : Dict="gelu" ,__A : Any=0.0 ,__A : List[str]=0.0 ,__A : str=0.02 ,__A : Union[str, Any]=1e-12 ,__A : List[Any]=224 ,__A : List[str]=16 ,__A : Any=3 ,__A : Dict=False ,__A : str=False ,__A : Optional[Any]=False ,__A : Any=False ,__A : List[Any]=0.1 ,__A : int=0.1 ,__A : Optional[int]=True ,__A : Optional[Any]=[3, 5, 7, 11] ,__A : List[str]=[1, 2, 3, 6] ,__A : Optional[Any]=True ,__A : Tuple=0.4 ,__A : Tuple=256 ,__A : List[str]=1 ,__A : List[Any]=False ,__A : Tuple=255 ,**__A : Tuple ,) -> Any:
super().__init__(**__A )
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = image_size
_lowercase = patch_size
_lowercase = num_channels
_lowercase = use_mask_token
_lowercase = use_absolute_position_embeddings
_lowercase = use_relative_position_bias
_lowercase = use_shared_relative_position_bias
_lowercase = layer_scale_init_value
_lowercase = drop_path_rate
_lowercase = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowercase = out_indices
_lowercase = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowercase = use_auxiliary_head
_lowercase = auxiliary_loss_weight
_lowercase = auxiliary_channels
_lowercase = auxiliary_num_convs
_lowercase = auxiliary_concat_input
_lowercase = semantic_loss_ignore_index
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : Tuple ) -> float:
return 1e-4 | 67 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 1 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__A : List[str] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__A : int = logging.WARNING
def __a ( ):
SCREAMING_SNAKE_CASE = os.getenv("DATASETS_VERBOSITY" , _SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option DATASETS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __a ( ):
return __name__.split("." )[0]
def __a ( ):
return logging.getLogger(_get_library_name() )
def __a ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def __a ( ):
SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def __a ( A__ : Optional[int] = None ):
if name is None:
SCREAMING_SNAKE_CASE = _get_library_name()
return logging.getLogger(_SCREAMING_SNAKE_CASE )
def __a ( ):
return _get_library_root_logger().getEffectiveLevel()
def __a ( A__ : List[Any] ):
_get_library_root_logger().setLevel(_SCREAMING_SNAKE_CASE )
def __a ( ):
return set_verbosity(_SCREAMING_SNAKE_CASE )
def __a ( ):
return set_verbosity(_SCREAMING_SNAKE_CASE )
def __a ( ):
return set_verbosity(_SCREAMING_SNAKE_CASE )
def __a ( ):
return set_verbosity(_SCREAMING_SNAKE_CASE )
def __a ( ):
SCREAMING_SNAKE_CASE = False
def __a ( ):
SCREAMING_SNAKE_CASE = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , *__lowerCamelCase : int , **__lowerCamelCase : Optional[Any] ): # pylint: disable=unused-argument
SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self : Dict ):
return iter(self._iterator )
def __getattr__( self : Union[str, Any] , __lowerCamelCase : Any ):
def empty_fn(*__lowerCamelCase : Optional[int] , **__lowerCamelCase : Dict ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ):
return self
def __exit__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Any ):
return
__A : Union[str, Any] = True
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : Optional[int] , *__lowerCamelCase : List[Any] , __lowerCamelCase : Dict=False , **__lowerCamelCase : Optional[int] ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__lowerCamelCase , **__lowerCamelCase )
else:
return EmptyTqdm(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : Any , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : Optional[int] ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__A : Dict = _tqdm_cls()
def __a ( ):
global _tqdm_active
return bool(_tqdm_active )
def __a ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE = True
def __a ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE = False | 701 |
from __future__ import annotations
from cmath import sqrt
def __a ( A__ : int , A__ : int , A__ : int ):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
SCREAMING_SNAKE_CASE = b * b - 4 * a * c
SCREAMING_SNAKE_CASE = (-b + sqrt(A__ )) / (2 * a)
SCREAMING_SNAKE_CASE = (-b - sqrt(A__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __a ( ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main() | 698 | 0 |
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : int = 100_0000 ):
'''simple docstring'''
UpperCAmelCase_ = limit + 1
UpperCAmelCase_ = [0] * limit
for first_term in range(1 , _UpperCamelCase ):
for n in range(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase_ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 390 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 227 | 0 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCAmelCase_ ( A ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
_a : Any = precision
_a : List[str] = ceil(precision / 1_4 )
_a : Optional[int] = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
_a : Optional[Any] = 1
_a : Optional[int] = 1_3_5_9_1_4_0_9
_a : Union[str, Any] = Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
_a : Optional[int] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 704 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {"vocab_file": "sentencepiece.model"}
UpperCAmelCase_ : Tuple = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCAmelCase_ : Dict = {
"google/rembert": 256,
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_="[CLS]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[UNK]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[PAD]" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , **lowerCamelCase_ , ) -> Optional[int]:
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
_a : int = do_lower_case
_a : List[str] = remove_space
_a : Optional[int] = keep_accents
_a : Optional[int] = vocab_file
_a : int = spm.SentencePieceProcessor()
self.sp_model.Load(lowerCamelCase_ )
@property
def __UpperCamelCase ( self ) -> List[str]:
return len(self.sp_model )
def __UpperCamelCase ( self ) -> Optional[Any]:
_a : Any = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
_a : Optional[int] = self.__dict__.copy()
_a : Any = None
return state
def __setstate__( self , lowerCamelCase_ ) -> Union[str, Any]:
_a : str = d
_a : str = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_=False ) -> List[Any]:
_a : Dict = self.sp_model.EncodeAsPieces(lowerCamelCase_ )
return pieces
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Optional[Any]:
return self.sp_model.PieceToId(lowerCamelCase_ )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Dict:
return self.sp_model.IdToPiece(lowerCamelCase_ )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Dict:
_a : int = self.sp_model.decode_pieces(lowerCamelCase_ )
return out_string
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
_a : Dict = [self.sep_token_id]
_a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
_a : Tuple = [self.sep_token_id]
_a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCamelCase_ ) )
return
_a : Dict = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 424 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
__lowerCAmelCase = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
__lowerCAmelCase = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase : Optional[Any] = MvpTokenizer
def __init__( self : Dict ,_a : Optional[int]=None ,_a : Dict=None ,_a : Optional[int]=None ,_a : Dict="replace" ,_a : Union[str, Any]="<s>" ,_a : int="</s>" ,_a : List[Any]="</s>" ,_a : List[Any]="<s>" ,_a : List[str]="<unk>" ,_a : Tuple="<pad>" ,_a : Tuple="<mask>" ,_a : int=False ,_a : Any=True ,**_a : int ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
_a : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,_a ) != add_prefix_space:
_a : Optional[Any] = getattr(_a ,pre_tok_state.pop('type' ) )
_a : int = add_prefix_space
_a : Union[str, Any] = pre_tok_class(**_a )
_a : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a : List[str] = 'post_processor'
_a : str = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
_a : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a : Optional[Any] = tuple(state['sep'] )
if "cls" in state:
_a : List[Any] = tuple(state['cls'] )
_a : Union[str, Any] = False
if state.get('add_prefix_space' ,_a ) != add_prefix_space:
_a : Union[str, Any] = add_prefix_space
_a : int = True
if state.get('trim_offsets' ,_a ) != trim_offsets:
_a : Optional[Any] = trim_offsets
_a : List[str] = True
if changes_to_apply:
_a : List[Any] = getattr(_a ,state.pop('type' ) )
_a : str = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def __lowercase ( self : Any ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowercase ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
_a : List[Any] = value
def __lowercase ( self : str ,*_a : List[str] ,**_a : Tuple ):
'''simple docstring'''
_a : Any = kwargs.get('is_split_into_words' ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_a ,**_a )
def __lowercase ( self : str ,*_a : Any ,**_a : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = kwargs.get('is_split_into_words' ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_a ,**_a )
def __lowercase ( self : List[Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : Any = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def __lowercase ( self : List[Any] ,_a : int ,_a : List[str]=None ):
'''simple docstring'''
_a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowercase ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : Union[str, Any] = [self.sep_token_id]
_a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 229 |
'''simple docstring'''
import operator as op
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : Dict = []
_a : List[str] = lambda __a , __a : int(x / y ) # noqa: E731 integer division operation
_a : List[Any] = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(1_2 ) , 'Stack' , sep=' | ' )
print('-' * (3_0 + len(__a )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__a ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
else:
_a : str = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
_a : str = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
stack.append(
str(opr[x](int(__a ) , int(__a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__lowerCAmelCase = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 229 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[str] = SwinConfig()
_lowerCamelCase : List[Any] = swin_name.split('_' )
_lowerCamelCase : Any = name_split[1]
_lowerCamelCase : Tuple = int(name_split[4] )
_lowerCamelCase : int = int(name_split[3][-1] )
if model_size == "tiny":
_lowerCamelCase : List[Any] = 96
_lowerCamelCase : Any = (2, 2, 6, 2)
_lowerCamelCase : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "small":
_lowerCamelCase : Union[str, Any] = 96
_lowerCamelCase : List[str] = (2, 2, 18, 2)
_lowerCamelCase : int = (3, 6, 12, 24)
elif model_size == "base":
_lowerCamelCase : List[str] = 128
_lowerCamelCase : Optional[int] = (2, 2, 18, 2)
_lowerCamelCase : Optional[int] = (4, 8, 16, 32)
else:
_lowerCamelCase : Any = 192
_lowerCamelCase : Optional[Any] = (2, 2, 18, 2)
_lowerCamelCase : Union[str, Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
_lowerCamelCase : List[Any] = 21841
else:
_lowerCamelCase : Any = 1000
_lowerCamelCase : Dict = 'huggingface/label-files'
_lowerCamelCase : Optional[Any] = 'imagenet-1k-id2label.json'
_lowerCamelCase : int = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
_lowerCamelCase : List[str] = idalabel
_lowerCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = img_size
_lowerCamelCase : List[Any] = num_classes
_lowerCamelCase : Optional[Any] = embed_dim
_lowerCamelCase : Tuple = depths
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Optional[Any] = window_size
return config
def _snake_case ( lowercase__ ):
if "patch_embed.proj" in name:
_lowerCamelCase : Tuple = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowerCamelCase : Union[str, Any] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowerCamelCase : Union[str, Any] = 'encoder.' + name
if "attn.proj" in name:
_lowerCamelCase : Any = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_lowerCamelCase : Any = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCamelCase : Optional[int] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowerCamelCase : List[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCamelCase : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
_lowerCamelCase : Any = 'layernorm.weight'
if name == "norm.bias":
_lowerCamelCase : int = 'layernorm.bias'
if "head" in name:
_lowerCamelCase : Optional[Any] = name.replace('head' , 'classifier' )
else:
_lowerCamelCase : List[Any] = 'swin.' + name
return name
def _snake_case ( lowercase__ , lowercase__ ):
for key in orig_state_dict.copy().keys():
_lowerCamelCase : int = orig_state_dict.pop(lowercase__ )
if "mask" in key:
continue
elif "qkv" in key:
_lowerCamelCase : int = key.split('.' )
_lowerCamelCase : Optional[Any] = int(key_split[1] )
_lowerCamelCase : Dict = int(key_split[3] )
_lowerCamelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCamelCase : Dict = val[:dim, :]
_lowerCamelCase : List[str] = val[
dim : dim * 2, :
]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : List[Any] = val[
:dim
]
_lowerCamelCase : int = val[
dim : dim * 2
]
_lowerCamelCase : List[str] = val[
-dim:
]
else:
_lowerCamelCase : List[Any] = val
return orig_state_dict
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Union[str, Any] = timm.create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
_lowerCamelCase : Optional[Any] = get_swin_config(lowercase__ )
_lowerCamelCase : Any = SwinForImageClassification(lowercase__ )
model.eval()
_lowerCamelCase : Tuple = convert_state_dict(timm_model.state_dict() , lowercase__ )
model.load_state_dict(lowercase__ )
_lowerCamelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase : int = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
_lowerCamelCase : Any = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
_lowerCamelCase : Dict = image_processor(images=lowercase__ , return_tensors='pt' )
_lowerCamelCase : List[str] = timm_model(inputs['pixel_values'] )
_lowerCamelCase : int = model(**lowercase__ ).logits
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase__ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path) | 492 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """deit"""
def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=True , lowercase=16 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Optional[Any] = image_size
_lowerCamelCase : Optional[int] = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Dict = qkv_bias
_lowerCamelCase : Dict = encoder_stride
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = version.parse("""1.11""" )
@property
def A_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A_ ( self ):
return 1E-4 | 492 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : str = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 202 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCAmelCase__ : int = logging.getLogger(__name__)
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Tuple = '''sequence-classification'''
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
if type(SCREAMING_SNAKE_CASE__ ) == dict:
SCREAMING_SNAKE_CASE__ : List[str] = Namespace(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = glue_output_modes[hparams.task]
SCREAMING_SNAKE_CASE__ : Optional[Any] = glue_tasks_num_labels[hparams.task]
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.mode )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return self.model(**SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
SCREAMING_SNAKE_CASE__ : Any = self(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = outputs[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.trainer.lr_schedulers[0]["""scheduler"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.hparams
SCREAMING_SNAKE_CASE__ : Optional[int] = processors[args.task]()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
SCREAMING_SNAKE_CASE__ : List[str] = self._feature_file(SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , SCREAMING_SNAKE_CASE__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
SCREAMING_SNAKE_CASE__ : str = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> DataLoader:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = """dev""" if mode == """test""" else mode
SCREAMING_SNAKE_CASE__ : Optional[int] = self._feature_file(SCREAMING_SNAKE_CASE__ )
logger.info("""Loading features from cached file %s""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
SCREAMING_SNAKE_CASE__ : str = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , batch_size=SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
SCREAMING_SNAKE_CASE__ : List[Any] = self(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = outputs[:2]
SCREAMING_SNAKE_CASE__ : int = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE__ : List[str] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
SCREAMING_SNAKE_CASE__ : Any = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
SCREAMING_SNAKE_CASE__ : int = np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.squeeze(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE__ : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE__ : Tuple = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
SCREAMING_SNAKE_CASE__ : int = dict(results.items() )
SCREAMING_SNAKE_CASE__ : str = results
return ret, preds_list, out_label_list
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self._eval_end(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self._eval_end(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=SCREAMING_SNAKE_CASE__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=SCREAMING_SNAKE_CASE__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
add_generic_args(_snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = GLUETransformer.add_model_specific_args(_snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
"""./results""" ,f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' ,)
os.makedirs(args.output_dir )
SCREAMING_SNAKE_CASE__ : Dict = GLUETransformer(_snake_case )
SCREAMING_SNAKE_CASE__ : int = generic_train(_snake_case ,_snake_case )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
SCREAMING_SNAKE_CASE__ : Any = sorted(glob.glob(os.path.join(args.output_dir ,"""checkpoint-epoch=*.ckpt""" ) ,recursive=_snake_case ) )
SCREAMING_SNAKE_CASE__ : Dict = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_snake_case )
if __name__ == "__main__":
main()
| 223 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case (_a ):
lowerCAmelCase__ = (PNDMScheduler,)
lowerCAmelCase__ = (("num_inference_steps", 5_0),)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : str ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : str = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : List[str] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config(**_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_lowerCAmelCase : str = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase : Any = dummy_past_residuals[:]
_lowerCAmelCase : int = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : List[str] = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : Tuple = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : Optional[int] = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Any=0 , **_UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : int = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
_lowerCAmelCase : List[Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Optional[Any] = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : Any = dummy_past_residuals[:]
_lowerCAmelCase : Tuple = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : str = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : Any = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : Any = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Any , **_UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(**_UpperCAmelCase )
_lowerCAmelCase : List[str] = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase : List[Any] = 10
_lowerCAmelCase : Any = self.dummy_model()
_lowerCAmelCase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCAmelCase : List[Any] = model(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Tuple = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCAmelCase : List[Any] = model(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Dict = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : List[str] = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Tuple = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase : str = self.dummy_sample
_lowerCAmelCase : str = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ):
_lowerCAmelCase : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCAmelCase : List[str] = dummy_past_residuals[:]
_lowerCAmelCase : Optional[int] = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCAmelCase : Tuple = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : Optional[Any] = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
_lowerCAmelCase : Dict = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(steps_offset=1 )
_lowerCAmelCase : Any = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 27
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Any = 0.1 * sample
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCAmelCase : List[Any] = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Tuple = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**_UpperCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : str = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase : List[Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.full_loop(prediction_type="""v_prediction""" )
_lowerCAmelCase : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
_lowerCAmelCase : Any = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : int = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 196 |
import cva
import numpy as np
class __snake_case :
def __init__( self : List[str] , _UpperCAmelCase : float , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if k in (0.04, 0.06):
_lowerCAmelCase : str = k
_lowerCAmelCase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ) -> str:
'''simple docstring'''
return str(self.k )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : str ) -> tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
_lowerCAmelCase : Tuple = cva.imread(_UpperCAmelCase , 0 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = img.shape
_lowerCAmelCase : list[list[int]] = []
_lowerCAmelCase : int = img.copy()
_lowerCAmelCase : str = cva.cvtColor(_UpperCAmelCase , cva.COLOR_GRAY2RGB )
_lowerCAmelCase , _lowerCAmelCase : int = np.gradient(_UpperCAmelCase )
_lowerCAmelCase : Any = dx**2
_lowerCAmelCase : Optional[int] = dy**2
_lowerCAmelCase : Optional[Any] = dx * dy
_lowerCAmelCase : Dict = 0.04
_lowerCAmelCase : Tuple = self.window_size // 2
for y in range(_UpperCAmelCase , h - offset ):
for x in range(_UpperCAmelCase , w - offset ):
_lowerCAmelCase : Optional[int] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCAmelCase : int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCAmelCase : Any = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCAmelCase : Dict = (wxx * wyy) - (wxy**2)
_lowerCAmelCase : Union[str, Any] = wxx + wyy
_lowerCAmelCase : int = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_lowerCamelCase : int = HarrisCorner(0.0_4, 3)
_lowerCamelCase , _lowerCamelCase : Any = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 196 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""ChineseCLIPFeatureExtractor"""]
snake_case_ = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 507 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ = logging.get_logger(__name__)
class a__ ( _lowercase ):
__magic_name__ : Tuple = ["pixel_values"]
def __init__(self : Tuple, __UpperCAmelCase : bool = True, __UpperCAmelCase : Dict[str, int] = None, __UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC, __UpperCAmelCase : bool = True, __UpperCAmelCase : Dict[str, int] = None, __UpperCAmelCase : bool = True, __UpperCAmelCase : Union[int, float] = 1 / 255, __UpperCAmelCase : bool = True, __UpperCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN, __UpperCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD, **__UpperCAmelCase : Dict, ) -> None:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = size if size is not None else {'''shortest_edge''': 224}
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__UpperCAmelCase, default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE : Dict = get_size_dict(__UpperCAmelCase, param_name='''crop_size''' )
SCREAMING_SNAKE_CASE : Optional[int] = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Any = resample
SCREAMING_SNAKE_CASE : int = do_center_crop
SCREAMING_SNAKE_CASE : str = crop_size
SCREAMING_SNAKE_CASE : int = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase__ (self : List[str], __UpperCAmelCase : np.ndarray, __UpperCAmelCase : Dict[str, int], __UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC, __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **__UpperCAmelCase : List[str], ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__UpperCAmelCase, default_to_square=__UpperCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : Any = int((256 / 224) * size['''shortest_edge'''] )
SCREAMING_SNAKE_CASE : List[Any] = get_resize_output_image_size(__UpperCAmelCase, size=__UpperCAmelCase, default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
__UpperCAmelCase, size=(size_dict['''height'''], size_dict['''width''']), resample=__UpperCAmelCase, data_format=__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : np.ndarray, __UpperCAmelCase : Dict[str, int], __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **__UpperCAmelCase : List[str], ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(__UpperCAmelCase, size=(size['''height'''], size['''width''']), data_format=__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : Any, __UpperCAmelCase : np.ndarray, __UpperCAmelCase : Union[int, float], __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **__UpperCAmelCase : List[str], ) -> np.ndarray:
"""simple docstring"""
return rescale(__UpperCAmelCase, scale=__UpperCAmelCase, data_format=__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : Optional[int], __UpperCAmelCase : np.ndarray, __UpperCAmelCase : Union[float, List[float]], __UpperCAmelCase : Union[float, List[float]], __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **__UpperCAmelCase : Tuple, ) -> np.ndarray:
"""simple docstring"""
return normalize(__UpperCAmelCase, mean=__UpperCAmelCase, std=__UpperCAmelCase, data_format=__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : List[Any], __UpperCAmelCase : ImageInput, __UpperCAmelCase : Optional[bool] = None, __UpperCAmelCase : Optional[Dict[str, int]] = None, __UpperCAmelCase : PILImageResampling = None, __UpperCAmelCase : Optional[bool] = None, __UpperCAmelCase : Optional[Dict[str, int]] = None, __UpperCAmelCase : Optional[bool] = None, __UpperCAmelCase : Optional[float] = None, __UpperCAmelCase : Optional[bool] = None, __UpperCAmelCase : Optional[Union[float, Iterable[float]]] = None, __UpperCAmelCase : Optional[Union[float, Iterable[float]]] = None, __UpperCAmelCase : Optional[TensorType] = None, __UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST, **__UpperCAmelCase : Any, ) -> BatchFeature:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : str = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[str] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Any = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(__UpperCAmelCase, default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Any = get_size_dict(__UpperCAmelCase, param_name='''crop_size''' )
SCREAMING_SNAKE_CASE : Optional[int] = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Dict = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : int = [self.resize(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : Dict = [self.center_crop(__UpperCAmelCase, __UpperCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : str = [self.rescale(__UpperCAmelCase, __UpperCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : str = [self.normalize(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE : Dict = [to_channel_dimension_format(__UpperCAmelCase, __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase, tensor_type=__UpperCAmelCase )
| 507 | 1 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
A_ : Dict =logging.get_logger(__name__)
A_ : int ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ : Any ={
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
A_ : List[Any] ={
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
A_ : str ={
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
A_ : Optional[Any] ={
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
A_ : str ={
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
A_ : str ={
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
A_ : Dict ={
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
A_ : Optional[Any] ={
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
A_ : Optional[Any] ={
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class __UpperCAmelCase ( __a ):
__A : Dict = VOCAB_FILES_NAMES
__A : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__A : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCAmelCase ( __a ):
__A : Optional[int] = VOCAB_FILES_NAMES
__A : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__A : List[str] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A_ : List[Any] =collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
A_ : Optional[Any] =collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
A_ : str =R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(__a )
class __UpperCAmelCase :
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
lowerCAmelCase_ = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
lowerCAmelCase_ = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
lowerCAmelCase_ = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
lowerCAmelCase_ = len(__UpperCamelCase )
lowerCAmelCase_ = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
F'''There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts.''' )
lowerCAmelCase_ = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['''input_ids''']
lowerCAmelCase_ = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )['''input_ids''']
lowerCAmelCase_ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
lowerCAmelCase_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase_ = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 16 , _lowerCamelCase = 64 , _lowerCamelCase = 4 , ):
lowerCAmelCase_ = reader_input['''input_ids''']
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = reader_output[:3]
lowerCAmelCase_ = len(__UpperCamelCase )
lowerCAmelCase_ = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
lowerCAmelCase_ = []
for doc_id in sorted_docs:
lowerCAmelCase_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase_ = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase_ = len(__UpperCamelCase )
lowerCAmelCase_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowerCAmelCase_ = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase_ = sorted(__UpperCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=__UpperCamelCase )
lowerCAmelCase_ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
lowerCAmelCase_ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__a )
class __UpperCAmelCase ( __a , __a ):
__A : Tuple = VOCAB_FILES_NAMES
__A : int = READER_PRETRAINED_VOCAB_FILES_MAP
__A : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Dict = READER_PRETRAINED_INIT_CONFIGURATION
__A : List[Any] = ['input_ids', 'attention_mask']
| 715 | '''simple docstring'''
def snake_case_ ( __snake_case : int = 1000) -> int:
lowerCAmelCase_ = 3
lowerCAmelCase_ = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 606 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=lowerCamelCase , )
assert hasattr(self , "env" )
def _snake_case ( self : List[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = {
"enabled": True,
"processes_per_host": 8,
}
__lowercase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase , py_version="py36" , )
def _snake_case ( self : Tuple , lowerCamelCase : Dict ):
'''simple docstring'''
TrainingJobAnalytics(lowerCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _snake_case ( self : List[Any] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = self.create_estimator(lowerCamelCase )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowerCamelCase )
| 402 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=lowerCamelCase , )
assert hasattr(self , "env" )
def _snake_case ( self : List[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = {
"enabled": True,
"processes_per_host": 8,
}
__lowercase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase , py_version="py36" , )
def _snake_case ( self : Tuple , lowerCamelCase : Dict ):
'''simple docstring'''
TrainingJobAnalytics(lowerCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _snake_case ( self : List[Any] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = self.create_estimator(lowerCamelCase )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowerCamelCase )
| 402 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : int = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "deit"
def __init__( self : Any , A : Any=7_68 , A : Any=12 , A : Optional[Any]=12 , A : Dict=30_72 , A : List[str]="gelu" , A : List[Any]=0.0 , A : int=0.0 , A : Any=0.02 , A : str=1e-12 , A : str=2_24 , A : Union[str, Any]=16 , A : Optional[int]=3 , A : Dict=True , A : List[Any]=16 , **A : int , ) -> str:
super().__init__(**A )
lowercase_ : Optional[int] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : int = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : Optional[int] = initializer_range
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : str = image_size
lowercase_ : List[Any] = patch_size
lowercase_ : str = num_channels
lowercase_ : List[str] = qkv_bias
lowercase_ : Optional[int] = encoder_stride
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Tuple = version.parse("1.11" )
@property
def A ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A ( self : Union[str, Any] ) -> float:
return 1e-4
| 141 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _UpperCAmelCase ( _A ):
def __init__( self : Optional[int] , A : Optional[Any]=0.01 , A : int=10_00 ) -> Optional[int]:
lowercase_ : Dict = p_stop
lowercase_ : Optional[Any] = max_length
def __iter__( self : Dict ) -> Dict:
lowercase_ : str = 0
lowercase_ : Optional[int] = False
while not stop and count < self.max_length:
yield count
count += 1
lowercase_ : List[str] = random.random() < self.p_stop
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : List[Any] , A : Any , A : Union[str, Any] , A : Optional[Any]=False , A : Dict=True ) -> str:
lowercase_ : Tuple = [
BatchSamplerShard(A , 2 , A , split_batches=A , even_batches=A )
for i in range(2 )
]
lowercase_ : Optional[Any] = [list(A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(A ) for shard in batch_sampler_shards] , [len(A ) for e in expected] )
self.assertListEqual(A , A )
def A ( self : Dict ) -> Tuple:
# Check the shards when the dataset is a round multiple of total batch size.
lowercase_ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
lowercase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase_ : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase_ : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase_ : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is very small.
lowercase_ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(A , A )
lowercase_ : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : List[str] = [[], []]
self.check_batch_sampler_shards(A , A )
def A ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of batch size.
lowercase_ : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
lowercase_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase_ : Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is very small.
lowercase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : Dict = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : str = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A )
def A ( self : str ) -> int:
# Check the shards when the dataset is a round multiple of total batch size.
lowercase_ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase_ : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase_ : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is very small.
lowercase_ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : Tuple = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[Any] = [[], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
def A ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
lowercase_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
lowercase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is very small.
lowercase_ : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : Union[str, Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : Dict = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
def A ( self : str ) -> str:
lowercase_ : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowercase_ : Tuple = [BatchSamplerShard(A , 2 , A , even_batches=A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def A ( self : Union[str, Any] , A : Union[str, Any] , A : Tuple , A : Dict , A : str=False , A : Any=2 , A : Optional[int]=False ) -> Optional[Any]:
random.seed(A )
lowercase_ : Any = list(A )
lowercase_ : Optional[int] = [
IterableDatasetShard(
A , batch_size=A , drop_last=A , num_processes=A , process_index=A , split_batches=A , )
for i in range(A )
]
lowercase_ : Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(A )
iterable_dataset_lists.append(list(A ) )
lowercase_ : List[Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowercase_ : Dict = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(A ) , len(A ) )
self.assertTrue(len(A ) % shard_batch_size == 0 )
lowercase_ : Optional[int] = []
for idx in range(0 , len(A ) , A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(A ) < len(A ):
reference += reference
self.assertListEqual(A , reference[: len(A )] )
def A ( self : Optional[Any] ) -> List[str]:
lowercase_ : int = 42
lowercase_ : Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
# Edge case with a very small dataset
lowercase_ : List[str] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
def A ( self : Optional[Any] ) -> Tuple:
lowercase_ : List[str] = BatchSampler(range(16 ) , batch_size=4 , drop_last=A )
lowercase_ : int = SkipBatchSampler(A , 2 )
self.assertListEqual(list(A ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : List[str] ) -> Union[str, Any]:
lowercase_ : int = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : Dict ) -> int:
lowercase_ : Optional[Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
lowercase_ : Union[str, Any] = skip_first_batches(A , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : List[str] ) -> str:
lowercase_ : Any = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def A ( self : Optional[Any] ) -> Optional[int]:
Accelerator()
lowercase_ : Tuple = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 141 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : int ,A : List[str] ,A : List[Any]=13 ,A : str=2 ,A : int=24 ,A : Any=16 ,A : Tuple=True ,A : Optional[int]=True ,A : int=32 ,A : int=5 ,A : Optional[Any]=4 ,A : str=37 ,A : Union[str, Any]="gelu" ,A : Union[str, Any]=0.1 ,A : List[str]=0.1 ,A : Tuple=10 ,A : str=0.02 ,A : Tuple=None ,A : List[str]=2 ,A : str=2 ,):
__A = parent
__A = batch_size
__A = patch_size
__A = max_length
__A = num_mel_bins
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = scope
__A = frequency_stride
__A = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__A = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__A = (self.max_length - self.patch_size) // self.time_stride + 1
__A = frequency_out_dimension * time_out_dimension
__A = num_patches + 2
def UpperCamelCase_ ( self : Optional[Any] ):
__A = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = self.get_config()
return config, input_values, labels
def UpperCamelCase_ ( self : str ):
return ASTConfig(
patch_size=self.patch_size ,max_length=self.max_length ,num_mel_bins=self.num_mel_bins ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A ,initializer_range=self.initializer_range ,frequency_stride=self.frequency_stride ,time_stride=self.time_stride ,)
def UpperCamelCase_ ( self : Tuple ,A : Dict ,A : str ,A : List[str] ):
__A = ASTModel(config=A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : int ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {"input_values": input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : str ,A : Union[str, Any] ,A : Optional[Any] ,A : List[str] ,A : List[str] ,A : List[Any] ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCamelCase_ ( self : Optional[int] ):
__A = ASTModelTester(self )
__A = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def UpperCamelCase_ ( self : int ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A ,nn.Linear ) )
def UpperCamelCase_ ( self : Optional[int] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["input_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@slow
def UpperCamelCase_ ( self : str ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = ASTModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
__A , __A = torchaudio.load(a_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : int ):
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.default_feature_extractor
__A = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(A )
__A = self.default_feature_extractor
__A , __A = prepare_audio()
__A = audio.squeeze().numpy()
__A = feature_extractor(A ,sampling_rate=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
# verify the logits
__A = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape ,A )
__A = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Dict = TextToVideoSDPipeline
a_ : Dict = TEXT_TO_IMAGE_PARAMS
a_ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a_ : str = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
_lowerCamelCase : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_lowerCamelCase : str = CLIPTextModel(A )
_lowerCamelCase : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCamelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _lowerCAmelCase ( self , A , A=0 ):
if str(A ).startswith('mps' ):
_lowerCamelCase : Tuple = torch.manual_seed(A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A )
_lowerCamelCase : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Dict = TextToVideoSDPipeline(**A )
_lowerCamelCase : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(A )
_lowerCamelCase : Union[str, Any] = 'np'
_lowerCamelCase : Optional[int] = sd_pipe(**A ).frames
_lowerCamelCase : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_lowerCamelCase : Tuple = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
_lowerCamelCase : Dict = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_lowerCamelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowerCamelCase : Tuple = pipe.to('cuda' )
_lowerCamelCase : str = 'Spiderman is surfing'
_lowerCamelCase : Any = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase : Union[str, Any] = pipe(A , generator=A , num_inference_steps=25 , output_type='pt' ).frames
_lowerCamelCase : Any = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
_lowerCamelCase : int = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_lowerCamelCase : Optional[Any] = pipe.to('cuda' )
_lowerCamelCase : Tuple = 'Spiderman is surfing'
_lowerCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase : Tuple = pipe(A , generator=A , num_inference_steps=2 , output_type='pt' ).frames
_lowerCamelCase : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 437 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if "model" in orig_key:
SCREAMING_SNAKE_CASE__ : List[Any] =orig_key.replace('''model.''', '''''' )
if "norm1" in orig_key:
SCREAMING_SNAKE_CASE__ : Tuple =orig_key.replace('''norm1''', '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
SCREAMING_SNAKE_CASE__ : Optional[Any] =orig_key.replace('''norm2''', '''output.LayerNorm''' )
if "norm" in orig_key:
SCREAMING_SNAKE_CASE__ : Dict =orig_key.replace('''norm''', '''LayerNorm''' )
if "transformer" in orig_key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =orig_key.split('''.''' )[0].split('''_''' )[-1]
SCREAMING_SNAKE_CASE__ : Dict =orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
SCREAMING_SNAKE_CASE__ : Optional[int] =orig_key.replace('''mha.attn''', '''attention.self''' )
if "mha" in orig_key:
SCREAMING_SNAKE_CASE__ : List[Any] =orig_key.replace('''mha''', '''attention''' )
if "W_q" in orig_key:
SCREAMING_SNAKE_CASE__ : Any =orig_key.replace('''W_q''', '''self.query''' )
if "W_k" in orig_key:
SCREAMING_SNAKE_CASE__ : Tuple =orig_key.replace('''W_k''', '''self.key''' )
if "W_v" in orig_key:
SCREAMING_SNAKE_CASE__ : Any =orig_key.replace('''W_v''', '''self.value''' )
if "ff1" in orig_key:
SCREAMING_SNAKE_CASE__ : Any =orig_key.replace('''ff1''', '''intermediate.dense''' )
if "ff2" in orig_key:
SCREAMING_SNAKE_CASE__ : List[Any] =orig_key.replace('''ff2''', '''output.dense''' )
if "ff" in orig_key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =orig_key.replace('''ff''', '''output.dense''' )
if "mlm_class" in orig_key:
SCREAMING_SNAKE_CASE__ : Optional[int] =orig_key.replace('''mlm.mlm_class''', '''cls.predictions.decoder''' )
if "mlm" in orig_key:
SCREAMING_SNAKE_CASE__ : int =orig_key.replace('''mlm''', '''cls.predictions.transform''' )
if "cls" not in orig_key:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''yoso.''' + orig_key
return orig_key
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : str ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : List[str] =orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =val
SCREAMING_SNAKE_CASE__ : Dict =orig_state_dict['''cls.predictions.decoder.bias''']
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''model_state_dict''']
SCREAMING_SNAKE_CASE__ : Optional[int] =YosoConfig.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =YosoForMaskedLM(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =convert_checkpoint_helper(config.max_position_embeddings, UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path) | 665 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = JukeboxTokenizer
snake_case_ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def __magic_name__ ( self : Optional[int] ) -> str:
import torch
SCREAMING_SNAKE_CASE__ : List[str] =JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
SCREAMING_SNAKE_CASE__ : str =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : str =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __magic_name__ ( self : Any ) -> List[str]:
import torch
SCREAMING_SNAKE_CASE__ : int =JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(**self.metas )['''input_ids''']
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 665 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( _A = 10**9 ):
a : Dict = 1
a : Union[str, Any] = 2
a : Optional[int] = 0
a : Tuple = 0
a : List[str] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
a : Union[str, Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"{solution() = }") | 526 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase: List[str] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 526 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__magic_name__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__magic_name__ = parser.parse_args()
__magic_name__ = '''cpu'''
__magic_name__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__magic_name__ = '''path-to-your-trained-model'''
__magic_name__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__magic_name__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__magic_name__ = pipe.to(device)
# to channels last
__magic_name__ = pipe.unet.to(memory_format=torch.channels_last)
__magic_name__ = pipe.vae.to(memory_format=torch.channels_last)
__magic_name__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__magic_name__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__magic_name__ = torch.randn(2, 4, 64, 64)
__magic_name__ = torch.rand(1) * 999
__magic_name__ = torch.randn(2, 77, 768)
__magic_name__ = (sample, timestep, encoder_hidden_status)
try:
__magic_name__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__magic_name__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__magic_name__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__magic_name__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__magic_name__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__magic_name__ = 666
__magic_name__ = torch.Generator(device).manual_seed(seed)
__magic_name__ = {'''generator''': generator}
if args.steps is not None:
__magic_name__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__magic_name__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 702 | def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679 | 0 |
from ...configuration_utils import PretrainedConfig
a_ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='tapas'
def __init__( self : int , a : Optional[Any]=3_0522 , a : Optional[Any]=768 , a : Dict=12 , a : str=12 , a : str=3072 , a : Optional[int]="gelu" , a : Optional[Any]=0.1 , a : Any=0.1 , a : List[str]=1024 , a : str=[3, 256, 256, 2, 256, 256, 10] , a : Tuple=0.02 , a : List[Any]=1e-12 , a : Tuple=0 , a : int=10.0 , a : Optional[Any]=0 , a : Optional[int]=1.0 , a : Optional[int]=None , a : List[Any]=1.0 , a : Optional[int]=False , a : int=None , a : Optional[int]=1.0 , a : List[Any]=1.0 , a : List[str]=False , a : int=False , a : Any="ratio" , a : Tuple=None , a : Optional[int]=None , a : List[str]=64 , a : str=32 , a : Union[str, Any]=False , a : Optional[Any]=True , a : Union[str, Any]=False , a : List[Any]=False , a : Optional[int]=True , a : int=False , a : List[Any]=None , a : Optional[int]=None , **a : int , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=a , **a )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_sizes
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE : Union[str, Any] = positive_label_weight
SCREAMING_SNAKE_CASE : Union[str, Any] = num_aggregation_labels
SCREAMING_SNAKE_CASE : int = aggregation_loss_weight
SCREAMING_SNAKE_CASE : List[Any] = use_answer_as_supervision
SCREAMING_SNAKE_CASE : List[str] = answer_loss_importance
SCREAMING_SNAKE_CASE : Optional[int] = use_normalized_answer_loss
SCREAMING_SNAKE_CASE : Dict = huber_loss_delta
SCREAMING_SNAKE_CASE : List[str] = temperature
SCREAMING_SNAKE_CASE : int = aggregation_temperature
SCREAMING_SNAKE_CASE : Optional[int] = use_gumbel_for_cells
SCREAMING_SNAKE_CASE : Optional[int] = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE : Tuple = average_approximation_function
SCREAMING_SNAKE_CASE : Optional[int] = cell_selection_preference
SCREAMING_SNAKE_CASE : Optional[Any] = answer_loss_cutoff
SCREAMING_SNAKE_CASE : int = max_num_rows
SCREAMING_SNAKE_CASE : Tuple = max_num_columns
SCREAMING_SNAKE_CASE : Optional[Any] = average_logits_per_cell
SCREAMING_SNAKE_CASE : str = select_one_column
SCREAMING_SNAKE_CASE : Any = allow_empty_column_selection
SCREAMING_SNAKE_CASE : Tuple = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE : Tuple = reset_position_index_per_cell
SCREAMING_SNAKE_CASE : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE : str = aggregation_labels
SCREAMING_SNAKE_CASE : Any = no_aggregation_label_index
if isinstance(self.aggregation_labels , a ):
SCREAMING_SNAKE_CASE : str = {int(a ): v for k, v in aggregation_labels.items()} | 25 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_lowerCAmelCase : Tuple = 1.0_5457_1817e-34 # unit of ℏ : J * s
_lowerCAmelCase : int = 3e8 # unit of c : m * s^-1
def a_ ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
lowerCamelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCamelCase = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCamelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A( unittest.TestCase ):
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = """ylacombe/bark-small"""
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = """en_speaker_1"""
lowerCamelCase_ = """This is a test string"""
lowerCamelCase_ = """speaker_embeddings_path.json"""
lowerCamelCase_ = """speaker_embeddings"""
def lowercase__ ( self : Dict , **__UpperCamelCase : List[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **__UpperCamelCase )
def lowercase__ ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase_ = 3_5
lowerCamelCase_ = 2
lowerCamelCase_ = 8
lowerCamelCase_ = {
"""semantic_prompt""": np.ones(__UpperCamelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ = processor(text=self.input_string , voice_preset=__UpperCamelCase )
lowerCamelCase_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ = processor(text=self.input_string , voice_preset=__UpperCamelCase )
lowerCamelCase_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BarkProcessor(tokenizer=__UpperCamelCase )
lowerCamelCase_ = processor(text=self.input_string )
lowerCamelCase_ = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_5_6 , add_special_tokens=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 702 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __A:
@staticmethod
def lowercase__ ( *__UpperCamelCase : str , **__UpperCamelCase : Dict ):
pass
def __lowerCAmelCase ( UpperCAmelCase__ : Image ) -> str:
lowerCamelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __A( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : int ):
lowerCamelCase_ = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , __UpperCamelCase )
import datasets
lowerCamelCase_ = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
lowerCamelCase_ = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , __UpperCamelCase , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = """Intel/dpt-large"""
lowerCamelCase_ = pipeline("""depth-estimation""" , model=__UpperCamelCase )
lowerCamelCase_ = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
lowerCamelCase_ = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : Dict ):
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 103 | 0 |
from typing import Any
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = data
_SCREAMING_SNAKE_CASE : List[Any] = None
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Tuple = None
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Dict = self.head
while temp is not None:
print(temp.data , end=" " )
_SCREAMING_SNAKE_CASE : Optional[int] = temp.next
print()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : List[Any] = Node(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.head
_SCREAMING_SNAKE_CASE : Optional[Any] = new_node
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Any:
if node_data_a == node_data_a:
return
else:
_SCREAMING_SNAKE_CASE : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
_SCREAMING_SNAKE_CASE : int = node_a.next
_SCREAMING_SNAKE_CASE : Tuple = self.head
while node_a is not None and node_a.data != node_data_a:
_SCREAMING_SNAKE_CASE : Dict = node_a.next
if node_a is None or node_a is None:
return
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
UpperCamelCase__ =LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list() | 249 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowercase__ =3_00 # TEMPERATURE (unit = K)
def UpperCamelCase_ ( A__ , A__ , A__ , ):
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase__ ='src/diffusers'
lowercase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
lowercase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase__ =spec.loader.load_module()
def UpperCamelCase_ ( A__ , A__ ):
return line.startswith(A__ ) or len(A__ ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , A__ ) is not None
def UpperCamelCase_ ( A__ ):
a_ = object_name.split(""".""" )
a_ = 0
# First let's find the module where our object lives.
a_ = parts[i]
while i < len(A__ ) and not os.path.isfile(os.path.join(A__ , F'''{module}.py''' ) ):
i += 1
if i < len(A__ ):
a_ = os.path.join(A__ , parts[i] )
if i >= len(A__ ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(A__ , F'''{module}.py''' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
a_ = f.readlines()
# Now let's find the class / func in the code!
a_ = """"""
a_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(A__ ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(A__ ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
a_ = line_index
while line_index < len(A__ ) and _should_continue(lines[line_index] , A__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a_ = lines[start_index:line_index]
return "".join(A__ )
lowercase__ =re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
lowercase__ =re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
lowercase__ =re.compile(r'<FILL\s+[^>]*>')
def UpperCamelCase_ ( A__ ):
a_ = code.split("""\n""" )
a_ = 0
while idx < len(A__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(A__ ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def UpperCamelCase_ ( A__ ):
a_ = len(get_indent(A__ ) ) > 0
if has_indent:
a_ = F'''class Bla:\n{code}'''
a_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=A__ )
a_ = black.format_str(A__ , mode=A__ )
a_ , a_ = style_docstrings_in_code(A__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def UpperCamelCase_ ( A__ , A__=False ):
with open(A__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
a_ = f.readlines()
a_ = []
a_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(A__ ):
a_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
a_ , a_ , a_ = search.groups()
a_ = find_code_in_diffusers(A__ )
a_ = get_indent(A__ )
a_ = line_index + 1 if indent == theoretical_indent else line_index + 2
a_ = theoretical_indent
a_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
a_ = True
while line_index < len(A__ ) and should_continue:
line_index += 1
if line_index >= len(A__ ):
break
a_ = lines[line_index]
a_ = _should_continue(A__ , A__ ) and re.search(F'''^{indent}# End copy''' , A__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a_ = lines[start_index:line_index]
a_ = """""".join(A__ )
# Remove any nested `Copied from` comments to avoid circular copies
a_ = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(A__ ) is None]
a_ = """\n""".join(A__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(A__ ) > 0:
a_ = replace_pattern.replace("""with""" , """""" ).split(""",""" )
a_ = [_re_replace_pattern.search(A__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
a_ , a_ , a_ = pattern.groups()
a_ = re.sub(A__ , A__ , A__ )
if option.strip() == "all-casing":
a_ = re.sub(obja.lower() , obja.lower() , A__ )
a_ = re.sub(obja.upper() , obja.upper() , A__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
a_ = blackify(lines[start_index - 1] + theoretical_code )
a_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
a_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
a_ = start_index + 1
if overwrite and len(A__ ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(A__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(A__ )
return diffs
def UpperCamelCase_ ( A__ = False ):
a_ = glob.glob(os.path.join(A__ , """**/*.py""" ) , recursive=A__ )
a_ = []
for filename in all_files:
a_ = is_copy_consistent(A__ , A__ )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(A__ ) > 0:
a_ = """\n""".join(A__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase__ =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 700 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowercase__ =None
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowercase__ ={
'google/fnet-base': 5_12,
'google/fnet-large': 5_12,
}
lowercase__ ='▁'
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Dict = VOCAB_FILES_NAMES
lowerCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple = ['input_ids', 'token_type_ids']
lowerCamelCase__ : Any = FNetTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="<unk>" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<pad>" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , **UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a_ = (
AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase , normalized=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase )
else mask_token
)
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
a_ = do_lower_case
a_ = remove_space
a_ = keep_accents
a_ = vocab_file
a_ = False if not self.vocab_file else True
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
a_ = os.path.join(
UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 511 | 0 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__magic_name__ : Dict = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
__magic_name__ : int = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
__magic_name__ : List[Any] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__magic_name__ : str = F'down_blocks.{i}.resnets.{j}.'
__magic_name__ : Any = F'input_blocks.{3*i + j + 1}.0.'
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__magic_name__ : Tuple = F'down_blocks.{i}.attentions.{j}.'
__magic_name__ : Tuple = F'input_blocks.{3*i + j + 1}.1.'
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__magic_name__ : Union[str, Any] = F'up_blocks.{i}.resnets.{j}.'
__magic_name__ : str = F'output_blocks.{3*i + j}.0.'
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__magic_name__ : Tuple = F'up_blocks.{i}.attentions.{j}.'
__magic_name__ : Union[str, Any] = F'output_blocks.{3*i + j}.1.'
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__magic_name__ : Tuple = F'down_blocks.{i}.downsamplers.0.conv.'
__magic_name__ : Dict = F'input_blocks.{3*(i+1)}.0.op.'
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__magic_name__ : Dict = F'up_blocks.{i}.upsamplers.0.'
__magic_name__ : int = F'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__magic_name__ : Any = """mid_block.attentions.0."""
__magic_name__ : Union[str, Any] = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__magic_name__ : Dict = F'mid_block.resnets.{j}.'
__magic_name__ : str = F'middle_block.{2*j}.'
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_snake_case = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_snake_case = v.replace(snake_case__ , snake_case__ )
_snake_case = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_snake_case = v.replace(snake_case__ , snake_case__ )
_snake_case = v
_snake_case = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__magic_name__ : List[Any] = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__magic_name__ : str = F'encoder.down_blocks.{i}.resnets.{j}.'
__magic_name__ : int = F'encoder.down.{i}.block.{j}.'
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__magic_name__ : List[Any] = F'down_blocks.{i}.downsamplers.0.'
__magic_name__ : Tuple = F'down.{i}.downsample.'
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__magic_name__ : List[str] = F'up_blocks.{i}.upsamplers.0.'
__magic_name__ : Union[str, Any] = F'up.{3-i}.upsample.'
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__magic_name__ : Dict = F'decoder.up_blocks.{i}.resnets.{j}.'
__magic_name__ : Optional[Any] = F'decoder.up.{3-i}.block.{j}.'
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__magic_name__ : Optional[int] = F'mid_block.resnets.{i}.'
__magic_name__ : Dict = F'mid.block_{i+1}.'
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__magic_name__ : Dict = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_snake_case = v.replace(snake_case__ , snake_case__ )
_snake_case = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_snake_case = v.replace(snake_case__ , snake_case__ )
_snake_case = v
_snake_case = {v: vae_state_dict[k] for k, v in mapping.items()}
_snake_case = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
_snake_case = reshape_weight_for_sd(snake_case__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__magic_name__ : int = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
__magic_name__ : Tuple = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__magic_name__ : Union[str, Any] = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__magic_name__ : int = {"""q""": 0, """k""": 1, """v""": 2}
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = {}
_snake_case = {}
_snake_case = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
_snake_case = k[: -len(".q_proj.weight" )]
_snake_case = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
_snake_case = [None, None, None]
_snake_case = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
_snake_case = k[: -len(".q_proj.bias" )]
_snake_case = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
_snake_case = [None, None, None]
_snake_case = v
continue
_snake_case = textenc_pattern.sub(lambda SCREAMING_SNAKE_CASE__ : protected[re.escape(m.group(0 ) )] , snake_case__ )
_snake_case = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
_snake_case = textenc_pattern.sub(lambda SCREAMING_SNAKE_CASE__ : protected[re.escape(m.group(0 ) )] , snake_case__ )
_snake_case = torch.cat(snake_case__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
_snake_case = textenc_pattern.sub(lambda SCREAMING_SNAKE_CASE__ : protected[re.escape(m.group(0 ) )] , snake_case__ )
_snake_case = torch.cat(snake_case__ )
return new_state_dict
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
__magic_name__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
__magic_name__ : int = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__magic_name__ : Dict = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
__magic_name__ : str = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
__magic_name__ : Optional[int] = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__magic_name__ : Tuple = load_file(unet_path, device="""cpu""")
else:
__magic_name__ : Optional[Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
__magic_name__ : Optional[Any] = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
__magic_name__ : int = load_file(vae_path, device="""cpu""")
else:
__magic_name__ : int = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
__magic_name__ : str = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
__magic_name__ : Dict = load_file(text_enc_path, device="""cpu""")
else:
__magic_name__ : int = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
__magic_name__ : Union[str, Any] = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
__magic_name__ : str = convert_unet_state_dict(unet_state_dict)
__magic_name__ : int = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__magic_name__ : Optional[Any] = convert_vae_state_dict(vae_state_dict)
__magic_name__ : Any = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__magic_name__ : Tuple = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__magic_name__ : Tuple = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
__magic_name__ : Optional[Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
__magic_name__ : Any = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
__magic_name__ : List[Any] = convert_text_enc_state_dict(text_enc_dict)
__magic_name__ : List[str] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__magic_name__ : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__magic_name__ : str = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__magic_name__ : int = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 672 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """perceiver"""
def __init__(self , __a=256 , __a=1280 , __a=768 , __a=1 , __a=26 , __a=8 , __a=8 , __a=None , __a=None , __a="kv" , __a=1 , __a=1 , __a="gelu" , __a=0.1 , __a=0.02 , __a=1E-1_2 , __a=True , __a=262 , __a=2048 , __a=56 , __a=[368, 496] , __a=16 , __a=1920 , __a=16 , __a=[1, 16, 224, 224] , **__a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__a )
UpperCAmelCase__ = num_latents
UpperCAmelCase__ = d_latents
UpperCAmelCase__ = d_model
UpperCAmelCase__ = num_blocks
UpperCAmelCase__ = num_self_attends_per_block
UpperCAmelCase__ = num_self_attention_heads
UpperCAmelCase__ = num_cross_attention_heads
UpperCAmelCase__ = qk_channels
UpperCAmelCase__ = v_channels
UpperCAmelCase__ = cross_attention_shape_for_attention
UpperCAmelCase__ = self_attention_widening_factor
UpperCAmelCase__ = cross_attention_widening_factor
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = use_query_residual
# masked language modeling attributes
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
# image classification attributes
UpperCAmelCase__ = image_size
# flow attributes
UpperCAmelCase__ = train_size
# multimodal autoencoding attributes
UpperCAmelCase__ = num_frames
UpperCAmelCase__ = audio_samples_per_frame
UpperCAmelCase__ = samples_per_patch
UpperCAmelCase__ = output_shape
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
@property
def UpperCamelCase__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCamelCase__ (self ) -> float:
"""simple docstring"""
return 1E-4
def UpperCamelCase__ (self , __a , __a = -1 , __a = -1 , __a = -1 , __a = False , __a = None , __a = 3 , __a = 40 , __a = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
if isinstance(__a , __a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase__ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ = preprocessor.num_special_tokens_to_add(__a )
UpperCAmelCase__ = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ = [' '.join(['a'] ) * seq_length] * batch_size
UpperCAmelCase__ = dict(preprocessor(__a , return_tensors=__a ) )
UpperCAmelCase__ = inputs.pop('input_ids' )
return inputs
elif isinstance(__a , __a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase__ = compute_effective_axis_dimension(__a , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase__ = self._generate_dummy_images(__a , __a , __a , __a )
UpperCAmelCase__ = dict(preprocessor(images=__a , return_tensors=__a ) )
UpperCAmelCase__ = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 146 | 0 |
def _lowercase ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
UpperCamelCase = """"""
while len(SCREAMING_SNAKE_CASE_ ) % 3 != 0:
UpperCamelCase = """0""" + bin_string
UpperCamelCase = [
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
UpperCamelCase = 0
for index, val in enumerate(SCREAMING_SNAKE_CASE_ ):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE_ ) )
oct_string += str(SCREAMING_SNAKE_CASE_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 181 |
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
UpperCamelCase , UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCamelCase , UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 | 1 |
def _A( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( _lowercase):
snake_case__ : Optional[int] = ["image_processor", "tokenizer"]
snake_case__ : Any = "ChineseCLIPImageProcessor"
snake_case__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Union[str, Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
_lowerCamelCase : Dict = kwargs.pop('''feature_extractor''' )
_lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.image_processor
def __call__( self : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_lowerCamelCase : Optional[int] = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
_lowerCamelCase : Optional[Any] = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
_lowerCamelCase : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = self.tokenizer.model_input_names
_lowerCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
| 598 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Tuple = GPTaTokenizer
snake_case__ : str = GPTaTokenizerFast
snake_case__ : Union[str, Any] = True
snake_case__ : Dict = {"add_prefix_space": True}
snake_case__ : Any = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
_lowerCamelCase : Union[str, Any] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowerCamelCase : List[Any] = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''lower newer'''
_lowerCamelCase : Any = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Any = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : Any = '''lower newer'''
_lowerCamelCase : Dict = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_lowerCamelCase : Optional[int] = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = tokens + [tokenizer.unk_token]
_lowerCamelCase : List[str] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : int = '''lower newer'''
# Testing tokenization
_lowerCamelCase : Optional[int] = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
_lowerCamelCase : str = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Dict = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing the unknown token
_lowerCamelCase : int = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : List[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[Any]=1_5 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# Simple input
_lowerCamelCase : Tuple = '''This is a simple input'''
_lowerCamelCase : List[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCamelCase : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
_lowerCamelCase : Tuple = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
_lowerCamelCase : List[str] = '''This is a simple input'''
_lowerCamelCase : int = ['''This is a simple input looooooooong''', '''This is a simple input''']
_lowerCamelCase : int = ('''This is a simple input''', '''This is a pair''')
_lowerCamelCase : int = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
_lowerCamelCase : Tuple = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
_lowerCamelCase : Any = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
_lowerCamelCase : List[Any] = tokenizer(*__lowerCAmelCase , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
_lowerCamelCase : Optional[Any] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''$$$'''
_lowerCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )
_lowerCamelCase : Any = '''This is a simple input'''
_lowerCamelCase : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = tokenizer(__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer(__lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , __lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : Any = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : Any = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = [self.get_tokenizer(do_lower_case=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_lowerCamelCase : str = '''Encode this.'''
_lowerCamelCase : Optional[Any] = '''This one too please.'''
_lowerCamelCase : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
encoded_sequence += tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode_plus(
__lowerCAmelCase , __lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , )
_lowerCamelCase : str = encoded_sequence_dict['''input_ids''']
_lowerCamelCase : List[Any] = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
_lowerCamelCase : Any = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__lowerCAmelCase )
]
_lowerCamelCase : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@require_tokenizers
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowerCAmelCase )
_lowerCamelCase : Tuple = '''A photo of a cat'''
_lowerCamelCase : Tuple = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''test_opt''' )
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained('''./test_opt''' )
_lowerCamelCase : Optional[int] = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=__lowerCAmelCase )
_lowerCamelCase : Tuple = '''A photo of a cat'''
_lowerCamelCase : List[str] = tokenizer.encode(
__lowerCAmelCase , )
# Same as above
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = '''bos'''
_lowerCamelCase : Optional[Any] = tokenizer.get_vocab()['''bos''']
_lowerCamelCase : Any = '''A photo of a cat'''
_lowerCamelCase : int = tokenizer.encode(
__lowerCAmelCase , )
# We changed the bos token
self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''./tok''' )
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
_lowerCamelCase : Tuple = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 598 | 1 |
"""simple docstring"""
from __future__ import annotations
def a ( __UpperCAmelCase : list[float] ) -> bool:
if len(__UpperCAmelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__magic_name__: List[Any] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
'''simple docstring'''
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[int] = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int=7_0_0_0_0 ):
'''simple docstring'''
snake_case_ : Dict = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
snake_case_ : Any = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Optional[Any] = np.dot(x.T , h - y ) / y.size
snake_case_ : str = theta - alpha * gradient # updating the weights
snake_case_ : int = np.dot(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = sigmoid_function(__UpperCamelCase )
snake_case_ : Dict = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_0_0 == 0:
print(F'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCAmelCase : Any = datasets.load_iris()
__lowerCAmelCase : List[Any] = iris.data[:, :2]
__lowerCAmelCase : Tuple = (iris.target != 0) * 1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__lowerCAmelCase) , (__lowerCAmelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCAmelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCAmelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 58 | 0 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase ( UpperCamelCase : str = "AAPL" ) -> str:
_lowerCamelCase = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_lowerCamelCase = BeautifulSoup(requests.get(UpperCamelCase ).text , 'html.parser' )
_lowerCamelCase = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''') | 709 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : Any=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : List[Any]=False ) -> List[str]:
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : str ) -> Tuple:
for i in range(config.num_hidden_layers ):
_lowerCamelCase = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( UpperCamelCase : Tuple ) -> List[Any]:
_lowerCamelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def lowerCamelCase ( UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : str ) -> Any:
_lowerCamelCase = dct.pop(UpperCamelCase )
_lowerCamelCase = val
@torch.no_grad()
def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : int ) -> Optional[int]:
_lowerCamelCase = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=UpperCamelCase )
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
if "vqa" in checkpoint_url:
_lowerCamelCase = True
_lowerCamelCase = 31_29
_lowerCamelCase = 'huggingface/label-files'
_lowerCamelCase = 'vqa2-id2label.json'
_lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = ViltForQuestionAnswering(UpperCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase = True
_lowerCamelCase = 2
_lowerCamelCase = {0: 'False', 1: 'True'}
_lowerCamelCase = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase = 3
_lowerCamelCase = ViltForImagesAndTextClassification(UpperCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase = True
_lowerCamelCase = ViltForImageAndTextRetrieval(UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase = True
_lowerCamelCase = ViltForMaskedLM(UpperCamelCase )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location='cpu' )['state_dict']
_lowerCamelCase = create_rename_keys(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCamelCase )
# Define processor
_lowerCamelCase = ViltImageProcessor(size=3_84 )
_lowerCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
_lowerCamelCase = ViltProcessor(UpperCamelCase , UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=UpperCamelCase ).raw )
_lowerCamelCase = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=UpperCamelCase ).raw )
_lowerCamelCase = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
_lowerCamelCase = processor(UpperCamelCase , UpperCamelCase , return_tensors='pt' )
_lowerCamelCase = processor(UpperCamelCase , UpperCamelCase , return_tensors='pt' )
_lowerCamelCase = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=UpperCamelCase ).raw )
if mlm_model:
_lowerCamelCase = 'a bunch of [MASK] laying on a [MASK].'
else:
_lowerCamelCase = 'How many cats are there?'
_lowerCamelCase = processor(UpperCamelCase , UpperCamelCase , return_tensors='pt' )
_lowerCamelCase = model(**UpperCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase = torch.Size([1, 11, 3_05_22] )
_lowerCamelCase = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase = torch.Size([1, 31_29] )
_lowerCamelCase = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase = torch.Size([1, 2] )
_lowerCamelCase = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
A = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 234 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_ ( __a , __a , __a ) -> int:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def UpperCamelCase_ ( __a , __a , __a , __a="attention" ) -> List[str]:
a__ : Union[str, Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
a__ : List[str] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
a__ : int = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
a__ : List[str] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
a__ : int = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
a__ : List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
a__ : Tuple = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
a__ : int = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def UpperCamelCase_ ( __a , __a , __a , __a=False ) -> Any:
if split_mlp_wi:
a__ : Optional[int] = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
a__ : str = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
a__ : Tuple = (wi_a, wi_a)
else:
a__ : Tuple = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
a__ : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def UpperCamelCase_ ( __a , __a , __a , __a ) -> Tuple:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def UpperCamelCase_ ( __a , *, __a , __a , __a = False ) -> List[str]:
a__ : str = traverse_util.flatten_dict(variables["target"] )
a__ : Dict = {'''/'''.join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
a__ : List[Any] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print("Split MLP:" , SCREAMING_SNAKE_CASE__ )
a__ : Optional[int] = collections.OrderedDict()
# Shared embeddings.
a__ : Dict = old['''token_embedder/embedding''']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
a__ : List[str] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "encoder" , "pre_attention_layer_norm" )
a__ : List[str] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "encoder" , "attention" )
a__ : Dict = layer_norm
a__ : Union[str, Any] = k.T
a__ : Optional[Any] = o.T
a__ : str = q.T
a__ : List[Any] = v.T
# Block i, layer 1 (MLP).
a__ : Dict = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "encoder" , "pre_mlp_layer_norm" )
a__ : Optional[Any] = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "encoder" , SCREAMING_SNAKE_CASE__ )
a__ : Union[str, Any] = layer_norm
if split_mlp_wi:
a__ : Any = wi[0].T
a__ : List[Any] = wi[1].T
else:
a__ : Any = wi.T
a__ : Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
a__ : Dict = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "encoder" ).T
a__ : int = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
a__ : Union[str, Any] = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , "encoder" ).T
a__ : int = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
a__ : Optional[Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decoder" , "pre_self_attention_layer_norm" )
a__ : Optional[Any] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decoder" , "self_attention" )
a__ : str = layer_norm
a__ : Any = k.T
a__ : Dict = o.T
a__ : int = q.T
a__ : Dict = v.T
# Block i, layer 1 (Cross Attention).
a__ : Tuple = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decoder" , "pre_cross_attention_layer_norm" )
a__ : str = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decoder" , "encoder_decoder_attention" )
a__ : Dict = layer_norm
a__ : List[str] = k.T
a__ : Optional[Any] = o.T
a__ : str = q.T
a__ : int = v.T
# Block i, layer 2 (MLP).
a__ : Dict = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decoder" , "pre_mlp_layer_norm" )
a__ : Dict = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decoder" , SCREAMING_SNAKE_CASE__ )
a__ : Tuple = layer_norm
if split_mlp_wi:
a__ : Any = wi[0].T
a__ : int = wi[1].T
else:
a__ : Union[str, Any] = wi.T
a__ : Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
a__ : List[Any] = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "decoder" ).T
a__ : Union[str, Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
a__ : Optional[int] = old['''decoder/logits_dense/kernel'''].T
return new
def UpperCamelCase_ ( __a , __a ) -> Dict:
a__ : List[str] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
a__ : Tuple = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
a__ : Optional[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
a__ : Optional[int] = state_dict['''shared.weight''']
return state_dict
def UpperCamelCase_ ( __a , __a , __a , __a , __a ) -> str:
a__ : Optional[Any] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
a__ : Union[str, Any] = convert_tax_to_pytorch(
SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ , scalable_attention=SCREAMING_SNAKE_CASE__ )
a__ : Optional[int] = make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
def UpperCamelCase_ ( __a , __a , __a , __a = False , __a = False , ) -> Any:
a__ : int = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
a__ : Dict = UMTaEncoderModel(SCREAMING_SNAKE_CASE__ )
else:
a__ : str = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE__ )
print("Done" )
if __name__ == "__main__":
UpperCamelCase : str = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
UpperCamelCase : Any = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 37 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = decoder_seq_length
# For common tests
UpperCamelCase__ = self.decoder_seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_attention_mask
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = d_model
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = decoder_start_token_id
UpperCamelCase__ = use_cache
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = None
UpperCamelCase__ = decoder_seq_length
UpperCamelCase__ = 2
UpperCamelCase__ = 1
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_attention_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = True
UpperCamelCase__ = TrOCRDecoder(config=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCamelCase__ = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) + 1 )
UpperCamelCase__ = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = model(_SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
UpperCamelCase__ = model(_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (TrOCRForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = TrOCRStandaloneDecoderModelTester(self , is_training=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ (self ):
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase_ (self ):
pass
| 713 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RobertaTokenizer
SCREAMING_SNAKE_CASE__ = RobertaTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = {"""cls_token""": """<s>"""}
def UpperCAmelCase_ (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCamelCase__ = {"""unk_token""": """<unk>"""}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = """lower newer"""
return input_text, output_text
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) # , add_prefix_space=True)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained("""roberta-base""" )
UpperCamelCase__ = tokenizer.encode("""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode(
"""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = """Encode this sequence."""
UpperCamelCase__ = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing spaces after special tokens
UpperCamelCase__ = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )} ) # mask token has a left space
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """Encode <mask> sequence"""
UpperCamelCase__ = """Encode <mask>sequence"""
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """A, <mask> AllenNLP sentence."""
UpperCamelCase__ = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def UpperCAmelCase_ (self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(post_processor_state["""trim_offsets"""] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = F"{text_of_1_token} {text_of_1_token}"
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
UpperCamelCase__ = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ) + 1, 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
| 86 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 567 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> bool:
'''simple docstring'''
lowercase_ = len(__lowerCAmelCase )
lowercase_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowercase_ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowercase_ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowercase_ = subset[i - 1][j]
if arr[i - 1] <= j:
lowercase_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567 | 1 |
_UpperCamelCase = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_UpperCamelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def _lowercase ( lowercase__ ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _lowercase ( lowercase__ ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def _lowercase ( ):
__lowerCAmelCase : str = '''Morse code here!'''
print(__snake_case )
__lowerCAmelCase : int = encrypt(__snake_case )
print(__snake_case )
__lowerCAmelCase : Optional[Any] = decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 712 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 583 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ : Dict = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int = ["""ViTFeatureExtractor"""]
__magic_name__ : List[str] = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Union[str, Any] = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Union[str, Any] = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__magic_name__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 615 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCAmelCase__ = 5_0_0_0_0
lowerCAmelCase__ = 5_0_0_0
lowerCAmelCase__ , lowerCAmelCase__ = os.path.split(__file__)
lowerCAmelCase__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: datasets.Dataset , SCREAMING_SNAKE_CASE_: List[Any] ) -> str:
'''simple docstring'''
for i in range(SCREAMING_SNAKE_CASE_ ):
A__ = dataset[i]
@get_duration
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: datasets.Dataset , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[str] ) -> str:
'''simple docstring'''
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
A__ = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: datasets.Dataset , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]:
'''simple docstring'''
with dataset.formatted_as(type=SCREAMING_SNAKE_CASE_ ):
for i in range(SCREAMING_SNAKE_CASE_ ):
A__ = dataset[i]
@get_duration
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: datasets.Dataset , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: List[Any] ) -> Any:
'''simple docstring'''
with dataset.formatted_as(type=SCREAMING_SNAKE_CASE_ ):
for i in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) -> Any:
'''simple docstring'''
A__ = {"num examples": SPEED_TEST_N_EXAMPLES}
A__ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_0}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_0_0}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_0_0_0}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_0}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_0_0_0}),
]
A__ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_0}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_0_0}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_0_0_0}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_0}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
A__ = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
A__ = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE_ , "dataset.arrow" ) , SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ , seq_shapes={"list": (1_0_0,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(SCREAMING_SNAKE_CASE_ ) )
A__ = func(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
print("shuffling dataset" )
A__ = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(SCREAMING_SNAKE_CASE_ ) )
A__ = func(
SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 514 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = torch.device('''cpu''')
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ = Image.open(requests.get(a , stream=a ).raw )
return im
def snake_case__ ( a ) -> Any:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def snake_case__ ( a , a , a ) -> List[Any]:
'''simple docstring'''
snake_case__ = dct.pop(a )
snake_case__ = val
def snake_case__ ( a ) -> int:
'''simple docstring'''
snake_case__ = []
for k in state_dict.keys():
snake_case__ = k
if ".pwconv" in k:
snake_case__ = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
snake_case__ = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
snake_case__ = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
snake_case__ = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
snake_case__ = k_new.split(""".""" )
if ls[2].isdigit():
snake_case__ = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
snake_case__ = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def snake_case__ ( a , a , a ) -> Any:
'''simple docstring'''
snake_case__ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ = 1000
snake_case__ = """huggingface/label-files"""
snake_case__ = """imagenet-1k-id2label.json"""
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
snake_case__ = [3, 3, 6, 4]
snake_case__ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
snake_case__ = [3, 3, 9, 6]
snake_case__ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
snake_case__ = [4, 3, 10, 5]
snake_case__ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
snake_case__ = [4, 4, 12, 6]
snake_case__ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
snake_case__ = torch.hub.load_state_dict_from_url(a , map_location="""cpu""" , check_hash=a )
else:
snake_case__ = torch.load(a , map_location="""cpu""" )
snake_case__ = checkpoint
snake_case__ = create_rename_keys(a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a , a , a )
# load HuggingFace model
snake_case__ = SwiftFormerForImageClassification(a ).eval()
hf_model.load_state_dict(a )
# prepare test inputs
snake_case__ = prepare_img()
snake_case__ = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
snake_case__ = processor(images=a , return_tensors="""pt""" )
# compare outputs from both models
snake_case__ = get_expected_output(a )
snake_case__ = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , a , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
a__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 711 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
a__ = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
a__ = dataset.iloc[:, 1:2].values
a__ = dataset.iloc[:, 2].values
a__ , a__ , a__ , a__ = train_test_split(X, y, test_size=0.2, random_state=0)
a__ = PolynomialFeatures(degree=4)
a__ = poly_reg.fit_transform(X)
a__ = LinearRegression()
pol_reg.fit(X_poly, y)
def snake_case__ ( ) -> int:
'''simple docstring'''
plt.scatter(a , a , color="""red""" )
plt.plot(a , pol_reg.predict(poly_reg.fit_transform(a ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003 | 566 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = 'data2vec-vision'
def __init__( self : Optional[Any] , __magic_name__ : Optional[int]=768 , __magic_name__ : Any=12 , __magic_name__ : int=12 , __magic_name__ : str=3072 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : List[Any]=0.02 , __magic_name__ : Any=1E-12 , __magic_name__ : Dict=224 , __magic_name__ : Dict=16 , __magic_name__ : List[Any]=3 , __magic_name__ : Tuple=False , __magic_name__ : Optional[int]=False , __magic_name__ : Any=False , __magic_name__ : Optional[Any]=False , __magic_name__ : List[Any]=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Tuple=True , __magic_name__ : Dict=[3, 5, 7, 11] , __magic_name__ : int=[1, 2, 3, 6] , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=0.4 , __magic_name__ : int=256 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : Union[str, Any]=False , __magic_name__ : Optional[int]=255 , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = use_mask_token
lowerCAmelCase__ = use_absolute_position_embeddings
lowerCAmelCase__ = use_relative_position_bias
lowerCAmelCase__ = use_shared_relative_position_bias
lowerCAmelCase__ = layer_scale_init_value
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase__ = out_indices
lowerCAmelCase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase__ = use_auxiliary_head
lowerCAmelCase__ = auxiliary_loss_weight
lowerCAmelCase__ = auxiliary_channels
lowerCAmelCase__ = auxiliary_num_convs
lowerCAmelCase__ = auxiliary_concat_input
lowerCAmelCase__ = semantic_loss_ignore_index
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = version.parse('1.11' )
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return 1E-4
| 48 |
from PIL import Image
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Image:
def brightness(__lowerCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowerCamelCase : Optional[int] =change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 228 | 0 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 708 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
a : Optional[int] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : str = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Tuple:
a : Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : Dict = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Dict = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Any = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> int:
# pass variant but use the non-variant filenames
a : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
a : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Any = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : Union[str, Any] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
# pass variant but use the non-variant filenames
a : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 31 | 0 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def a_ ( lowerCamelCase : str=None , lowerCamelCase : Union[str, Any]=None ):
return field(default_factory=lambda: default , metadata=lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : str = field(
metadata={'''help''': '''The csv file to plot.'''} , )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
lowerCamelCase : bool = field(
default=__lowercase , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
lowerCamelCase : Optional[List[str]] = list_field(
default=__lowercase , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def a_ ( lowerCamelCase : Tuple ):
try:
int(lowerCamelCase )
return True
except ValueError:
return False
def a_ ( lowerCamelCase : List[Any] ):
try:
float(lowerCamelCase )
return True
except ValueError:
return False
class UpperCAmelCase_ :
def __init__( self : int , UpperCAmelCase__ : Dict ) -> Optional[Any]:
lowerCAmelCase = args
lowerCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
lowerCAmelCase = csv.DictReader(UpperCAmelCase__ )
for row in reader:
lowerCAmelCase = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
lowerCAmelCase = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
lowerCAmelCase = float(row['result'] )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
lowerCAmelCase , lowerCAmelCase = plt.subplots()
lowerCAmelCase = 'Time usage' if self.args.is_time else 'Memory usage'
lowerCAmelCase = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCAmelCase = sorted(set(self.result_dict[model_name]['bsz'] ) )
lowerCAmelCase = sorted(set(self.result_dict[model_name]['seq_len'] ) )
lowerCAmelCase = self.result_dict[model_name]['result']
((lowerCAmelCase) , (lowerCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=UpperCAmelCase__ , )
else:
lowerCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((lowerCAmelCase) , (lowerCAmelCase)) = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
lowerCAmelCase = np.asarray(UpperCAmelCase__ , UpperCAmelCase__ )[: len(UpperCAmelCase__ )]
plt.scatter(
UpperCAmelCase__ , UpperCAmelCase__ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(UpperCAmelCase__ , UpperCAmelCase__ , '--' )
title_str += F''' {label_model_name} vs.'''
lowerCAmelCase = title_str[:-4]
lowerCAmelCase = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(UpperCAmelCase__ )
plt.xlabel(UpperCAmelCase__ )
plt.ylabel(UpperCAmelCase__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def a_ ( ):
lowerCAmelCase = HfArgumentParser(lowerCamelCase )
lowerCAmelCase = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase = Plot(args=lowerCamelCase )
plot.plot()
if __name__ == "__main__":
main()
| 133 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] ):
# Initialise PyTorch model
lowerCAmelCase = RemBertConfig.from_json_file(lowerCamelCase )
print('Building PyTorch model from configuration: {}'.format(str(lowerCamelCase ) ) )
lowerCAmelCase = RemBertModel(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCamelCase ) )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case =parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 133 | 1 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {"vocab_file": "vocab.txt"}
a = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
a = {
"openbmb/cpm-ant-10b": 1024,
}
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Dict:
_UpperCAmelCase = collections.OrderedDict()
with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as reader:
_UpperCAmelCase = reader.readlines()
for index, token in enumerate(__SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = token.rstrip("""\n""" )
_UpperCAmelCase = index
return vocab
class lowerCamelCase ( lowercase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE=200 ):
_UpperCAmelCase = vocab
_UpperCAmelCase = unk_token
_UpperCAmelCase = max_input_chars_per_word
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = list(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > self.max_input_chars_per_word:
return [self.unk_token]
_UpperCAmelCase = 0
_UpperCAmelCase = []
while start < len(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = None
while start < end:
_UpperCAmelCase = """""".join(chars[start:end] )
if substr in self.vocab:
_UpperCAmelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = end
return sub_tokens
class lowerCamelCase ( lowercase__ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ['input_ids', 'attention_mask']
__a = False
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<d>" , _SCREAMING_SNAKE_CASE="</d>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="</n>" , _SCREAMING_SNAKE_CASE="</_>" , _SCREAMING_SNAKE_CASE="left" , **_SCREAMING_SNAKE_CASE , ):
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=_SCREAMING_SNAKE_CASE , eod_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , line_token=_SCREAMING_SNAKE_CASE , space_token=_SCREAMING_SNAKE_CASE , padding_side=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = bod_token
_UpperCAmelCase = eod_token
_UpperCAmelCase = load_vocab(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.encoder[space_token]
_UpperCAmelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_UpperCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def UpperCAmelCase ( self ):
return self.encoder[self.bod_token]
@property
def UpperCAmelCase ( self ):
return self.encoder[self.eod_token]
@property
def UpperCAmelCase ( self ):
return self.encoder["\n"]
@property
def UpperCAmelCase ( self ):
return len(self.encoder )
def UpperCAmelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = []
for x in jieba.cut(_SCREAMING_SNAKE_CASE , cut_all=_SCREAMING_SNAKE_CASE ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) )
return output_tokens
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = [i for i in token_ids if i >= 0]
_UpperCAmelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return token in self.encoder
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return "".join(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_UpperCAmelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
_UpperCAmelCase = 0
if " " in self.encoder:
_UpperCAmelCase = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
_UpperCAmelCase = self.encoder["""\n"""]
del self.encoder["\n"]
_UpperCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
_UpperCAmelCase = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE ))
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) | 701 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> int:
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case , keep_in_memory=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> int:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read()
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Optional[int]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
_UpperCAmelCase = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
_UpperCAmelCase = features.copy()
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = JsonDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read()
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case , split=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Any:
if issubclass(snake_case , snake_case ):
_UpperCAmelCase = jsonl_path
elif issubclass(snake_case , snake_case ):
_UpperCAmelCase = [jsonl_path]
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case=("train",) ) -> str:
assert isinstance(snake_case , snake_case )
for split in splits:
_UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> str:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=snake_case , keep_in_memory=snake_case ).read()
_check_json_datasetdict(snake_case , snake_case )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Tuple:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader({"""train""": jsonl_path} , features=snake_case , cache_dir=snake_case ).read()
_check_json_datasetdict(snake_case , snake_case )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[Any]:
if split:
_UpperCAmelCase = {split: jsonl_path}
else:
_UpperCAmelCase = """train"""
_UpperCAmelCase = {"""train""": jsonl_path, """test""": jsonl_path}
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case ).read()
_check_json_datasetdict(snake_case , snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[Any]:
return json.load(snake_case )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[Any]:
return [json.loads(snake_case ) for line in buffer]
class _A :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert isinstance(exported_content[0] , _SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , orient=_SCREAMING_SNAKE_CASE ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_SCREAMING_SNAKE_CASE , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert isinstance(exported_content[0] , _SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , orient=_SCREAMING_SNAKE_CASE , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_SCREAMING_SNAKE_CASE , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_SCREAMING_SNAKE_CASE ) == 10
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
with pytest.raises(_SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = tmp_path_factory.mktemp("""data""" ) / F"test.json.{extension}"
_UpperCAmelCase = str(shared_datadir / F"test_file.json.{extension}" )
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compression=_SCREAMING_SNAKE_CASE ).write()
with fsspec.open(_SCREAMING_SNAKE_CASE , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase = f.read()
with fsspec.open(_SCREAMING_SNAKE_CASE , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase = f.read()
assert exported_content == original_content | 175 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowercase_ (a__ ):
snake_case =None
snake_case =None
snake_case =None
snake_case =None
class lowercase_ (a__ ):
def __init__( self , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=512 , lowercase_="cls" , lowercase_=False , lowercase_=True , **lowercase_ , ) -> Any:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__)
a__ =project_dim
a__ =pooler_fn
a__ =learn_encoder
a__ =use_attention_mask
class lowercase_ (a__ ):
snake_case =[r'''pooler''', r'''logit_scale''']
snake_case =[r'''position_ids''', r'''predictions.decoder.bias''']
snake_case ='''roberta'''
snake_case =RobertaSeriesConfig
def __init__( self , lowercase_) -> Any:
super().__init__(SCREAMING_SNAKE_CASE__)
a__ =XLMRobertaModel(SCREAMING_SNAKE_CASE__)
a__ =nn.Linear(config.hidden_size , config.project_dim)
a__ =getattr(SCREAMING_SNAKE_CASE__ , 'has_pre_transformation' , SCREAMING_SNAKE_CASE__)
if self.has_pre_transformation:
a__ =nn.Linear(config.hidden_size , config.project_dim)
a__ =nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps)
self.post_init()
def __UpperCamelCase ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> Optional[int]:
a__ =return_dict if return_dict is not None else self.config.use_return_dict
a__ =self.base_model(
input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , inputs_embeds=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , output_attentions=SCREAMING_SNAKE_CASE__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=SCREAMING_SNAKE_CASE__ , )
if self.has_pre_transformation:
a__ =outputs["""hidden_states"""][-2]
a__ =self.pre_LN(SCREAMING_SNAKE_CASE__)
a__ =self.transformation_pre(SCREAMING_SNAKE_CASE__)
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
a__ =self.transformation(outputs.last_hidden_state)
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 20 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase__ : Tuple = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ : Optional[Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase__ : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase__ : Dict = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
SCREAMING_SNAKE_CASE__ : Tuple = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' ,_snake_case ,)
is not None
):
SCREAMING_SNAKE_CASE__ : Dict = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
SCREAMING_SNAKE_CASE__ : List[str] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
SCREAMING_SNAKE_CASE__ : List[str] = True
if not attribute_used:
SCREAMING_SNAKE_CASE__ : List[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
SCREAMING_SNAKE_CASE__ : Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
SCREAMING_SNAKE_CASE__ : int = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
elif attribute.endswith("""_token_id""" ):
SCREAMING_SNAKE_CASE__ : List[Any] = True
# configuration class specific cases
if not case_allowed:
SCREAMING_SNAKE_CASE__ : Tuple = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ ,[] )
SCREAMING_SNAKE_CASE__ : int = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Any = dict(inspect.signature(config_class.__init__ ).parameters )
SCREAMING_SNAKE_CASE__ : Optional[int] = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
SCREAMING_SNAKE_CASE__ : List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
SCREAMING_SNAKE_CASE__ : List[Any] = {}
if len(config_class.attribute_map ) > 0:
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
SCREAMING_SNAKE_CASE__ : int = inspect.getsourcefile(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = os.path.dirname(_snake_case )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
SCREAMING_SNAKE_CASE__ : Optional[Any] = [os.path.join(_snake_case ,_snake_case ) for fn in os.listdir(_snake_case ) if fn.startswith("""modeling_""" )]
# Get the source code strings
SCREAMING_SNAKE_CASE__ : Any = []
for path in modeling_paths:
if os.path.isfile(_snake_case ):
with open(_snake_case ) as fp:
modeling_sources.append(fp.read() )
SCREAMING_SNAKE_CASE__ : List[str] = []
for config_param, default_value in zip(_snake_case ,_snake_case ):
# `attributes` here is all the variant names for `config_param`
SCREAMING_SNAKE_CASE__ : List[Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
unused_attributes.append(attributes[0] )
return sorted(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) ,lambda _snake_case : inspect.isclass(_snake_case )
and issubclass(_snake_case ,_snake_case )
and inspect.getmodule(_snake_case ) == inspect.getmodule(_config_class ) ,)
]
for config_class in config_classes_in_module:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = check_config_attributes_being_used(_snake_case )
if len(_snake_case ) > 0:
SCREAMING_SNAKE_CASE__ : str = unused_attributes
if len(_snake_case ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(_snake_case )
if __name__ == "__main__":
check_config_attributes()
| 223 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: str = """efficientnet"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 600 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 2560 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : Tuple , ):
super().__init__(**lowerCamelCase_ )
_lowerCAmelCase =num_channels
_lowerCAmelCase =image_size
_lowerCAmelCase =width_coefficient
_lowerCAmelCase =depth_coefficient
_lowerCAmelCase =depth_divisor
_lowerCAmelCase =kernel_sizes
_lowerCAmelCase =in_channels
_lowerCAmelCase =out_channels
_lowerCAmelCase =depthwise_padding
_lowerCAmelCase =strides
_lowerCAmelCase =num_block_repeats
_lowerCAmelCase =expand_ratios
_lowerCAmelCase =squeeze_expansion_ratio
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dim
_lowerCAmelCase =pooling_type
_lowerCAmelCase =initializer_range
_lowerCAmelCase =batch_norm_eps
_lowerCAmelCase =batch_norm_momentum
_lowerCAmelCase =dropout_rate
_lowerCAmelCase =drop_connect_rate
_lowerCAmelCase =sum(lowerCamelCase_ ) * 4
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: Optional[int] = version.parse("""1.11""" )
@property
def lowerCAmelCase__ ( self : List[str] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self : List[str] ):
return 1e-5
| 149 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowercase__ : Any , lowercase__ : List[str] , lowercase__ : List[str] ):
'''simple docstring'''
_lowerCAmelCase =BertConfig.from_json_file(lowercase__ )
print(f"Building PyTorch model from configuration: {config}" )
_lowerCAmelCase =BertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 149 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_lowerCamelCase : int = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
A__ = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
A__ = getattr(lowercase_ , lowercase_ ).shape
else:
A__ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
else:
A__ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == '''group''' , )
A__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A__ = True
if "*" in mapped_key:
A__ = name.split(lowercase_ )[0].split('''.''' )[-2]
A__ = mapped_key.replace('''*''' , lowercase_ )
if "weight_g" in name:
A__ = '''weight_g'''
elif "weight_v" in name:
A__ = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
A__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ = '''weight'''
else:
A__ = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = full_name.split('''conv_layers.''' )[-1]
A__ = name.split('''.''' )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None ) -> List[Any]:
"""simple docstring"""
A__ = torch.load(lowercase_ )
A__ = WavLMConfigOrig(checkpoint['''cfg'''] )
A__ = WavLMOrig(lowercase_ )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
A__ = WavLMConfig.from_pretrained(lowercase_ )
else:
A__ = WavLMConfig()
A__ = WavLMModel(lowercase_ )
recursively_load_weights(lowercase_ , lowercase_ )
hf_wavlm.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 87 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="divided_space_time" , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : str = image_size
UpperCAmelCase : str = num_channels
UpperCAmelCase : int = patch_size
UpperCAmelCase : Union[str, Any] = num_frames
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : int = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : List[Any] = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = attention_type
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Tuple = scope
UpperCAmelCase : Optional[Any] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCAmelCase : Dict = (image_size // patch_size) ** 2
UpperCAmelCase : int = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
UpperCAmelCase : Union[str, Any] = self.num_labels
return config
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple = TimesformerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = TimesformerForVideoClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
# verify the logits shape
UpperCAmelCase : Optional[int] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : int = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__lowerCAmelCase : List[str] = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : int = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[Any] = False
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Any = TimesformerModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(
self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : int = TimesformerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = True
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = self.model_tester.seq_length
UpperCAmelCase : Dict = self.model_tester.num_frames
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Tuple = False
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase : int = True
UpperCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Tuple = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
UpperCAmelCase : int = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCAmelCase : str = True
UpperCAmelCase : int = True
UpperCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Optional[int] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase : str = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : int = outputs.hidden_states
UpperCAmelCase : Optional[int] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( ):
UpperCAmelCase : int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
UpperCAmelCase : int = np.load(UpperCamelCase )
return list(UpperCamelCase )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[str] = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Optional[int] = prepare_video()
UpperCAmelCase : List[Any] = image_processor(video[:8] , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase : Tuple = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase : Union[str, Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 160 | 0 |
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
UpperCamelCase : str = len(SCREAMING_SNAKE_CASE_ ) + 1
UpperCamelCase : Tuple = len(SCREAMING_SNAKE_CASE_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCamelCase : str = [[0 for i in range(SCREAMING_SNAKE_CASE_ )] for j in range(SCREAMING_SNAKE_CASE_ )]
# since string of zero length match pattern of zero length
UpperCamelCase : Optional[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
for j in range(1 , SCREAMING_SNAKE_CASE_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCamelCase : int = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCamelCase : Union[str, Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCamelCase : Tuple = dp[i - 1][j]
else:
UpperCamelCase : Any = 0
else:
UpperCamelCase : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__UpperCAmelCase : Tuple = "aab"
__UpperCAmelCase : Any = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 643 |
import requests
from bsa import BeautifulSoup
def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , '''html.parser''' )
UpperCamelCase : Dict = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 643 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Tuple = KandinskyVaaImgaImgPipeline
__lowercase : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''']
__lowercase : Union[str, Any] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
__lowercase : Tuple = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__lowercase : List[str] = False
@property
def snake_case_ ( self):
return 3_2
@property
def snake_case_ ( self):
return 3_2
@property
def snake_case_ ( self):
return self.time_input_dim
@property
def snake_case_ ( self):
return self.time_input_dim * 4
@property
def snake_case_ ( self):
return 1_0_0
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(**lowerCAmelCase__)
return model
@property
def snake_case_ ( self):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs)
return model
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.dummy_unet
__SCREAMING_SNAKE_CASE = self.dummy_movq
__SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__SCREAMING_SNAKE_CASE = DDIMScheduler(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=0):
__SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowerCAmelCase__)
# create init_image
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCAmelCase__)).convert("""RGB""").resize((2_5_6, 2_5_6))
if str(lowerCAmelCase__).startswith("""mps"""):
__SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(lowerCAmelCase__) , return_dict=lowerCAmelCase__ , )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""")
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""")
__SCREAMING_SNAKE_CASE = """A red cartoon frog, 4k"""
__SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa)
pipe_prior.to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa)
__SCREAMING_SNAKE_CASE = pipeline.to(lowerCAmelCase__)
pipeline.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""").manual_seed(0)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__SCREAMING_SNAKE_CASE = pipeline(
image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
| 155 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 155 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ):
torch.manual_seed(0 )
UpperCamelCase_ : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def _UpperCAmelCase ( self ):
torch.manual_seed(0 )
UpperCamelCase_ : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def _UpperCAmelCase ( self ):
torch.manual_seed(0 )
UpperCamelCase_ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[Any] = self.dummy_uncond_unet
UpperCamelCase_ : Union[str, Any] = DDIMScheduler()
UpperCamelCase_ : List[str] = self.dummy_vq_model
UpperCamelCase_ : Union[str, Any] = LDMPipeline(unet=__lowerCAmelCase , vqvae=__lowerCAmelCase , scheduler=__lowerCAmelCase )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase_ : str = torch.manual_seed(0 )
UpperCamelCase_ : Tuple = ldm(generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_ : Dict = torch.manual_seed(0 )
UpperCamelCase_ : Optional[int] = ldm(generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=__lowerCAmelCase )[0]
UpperCamelCase_ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_ : List[Any] = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
UpperCamelCase_ : Optional[int] = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase_ : List[str] = torch.manual_seed(0 )
UpperCamelCase_ : Union[str, Any] = ldm(generator=__lowerCAmelCase , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCamelCase_ : Tuple = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
UpperCamelCase_ : Optional[int] = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 708 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A :
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=2 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=10 , __lowerCAmelCase=3 , __lowerCAmelCase=32 * 4 , __lowerCAmelCase=32 * 6 , __lowerCAmelCase=4 , __lowerCAmelCase=32 , ):
UpperCamelCase_ : List[str] = parent
UpperCamelCase_ : List[Any] = batch_size
UpperCamelCase_ : int = is_training
UpperCamelCase_ : Union[str, Any] = use_auxiliary_loss
UpperCamelCase_ : Optional[Any] = num_queries
UpperCamelCase_ : List[str] = num_channels
UpperCamelCase_ : Optional[int] = min_size
UpperCamelCase_ : Union[str, Any] = max_size
UpperCamelCase_ : Optional[int] = num_labels
UpperCamelCase_ : List[Any] = mask_feature_size
def _UpperCAmelCase ( self ):
UpperCamelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
UpperCamelCase_ : Dict = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
UpperCamelCase_ : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
UpperCamelCase_ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCAmelCase ( self ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _UpperCAmelCase ( self ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase_ : int = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : List[str] = output.encoder_hidden_states
UpperCamelCase_ : Any = output.pixel_decoder_hidden_states
UpperCamelCase_ : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
with torch.no_grad():
UpperCamelCase_ : Tuple = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase_ : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Optional[int] = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase_ : List[Any] = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
UpperCamelCase_ : List[str] = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
UpperCamelCase_ : Dict = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
"""simple docstring"""
__a : Tuple = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__a : List[str] = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__a : List[Any] = False
__a : int = False
__a : List[str] = False
__a : Any = False
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = MaskFormerModelTester(self )
UpperCamelCase_ : Optional[int] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def _UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
UpperCamelCase_ , UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def _UpperCAmelCase ( self ):
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def _UpperCAmelCase ( self ):
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def _UpperCAmelCase ( self ):
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def _UpperCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _UpperCAmelCase ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
UpperCamelCase_ , UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : List[Any] = model_class(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def _UpperCAmelCase ( self ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCamelCase_ : List[str] = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = (self.model_tester.min_size,) * 2
UpperCamelCase_ : Optional[int] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__lowerCAmelCase ),
"""class_labels""": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(),
}
UpperCamelCase_ : Union[str, Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
UpperCamelCase_ : List[str] = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def _UpperCAmelCase ( self ):
UpperCamelCase_ , UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ , UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Optional[int] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
UpperCamelCase_ : Optional[int] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _UpperCAmelCase ( self ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase_ : int = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ : Dict = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
UpperCamelCase_ : Union[str, Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ):
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase_ : List[Any] = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : Any = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
UpperCamelCase_ : Dict = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
UpperCamelCase_ : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase_ : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCamelCase_ : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase_ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCamelCase =1E-4
def snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ):
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[Any] = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__lowerCAmelCase )
UpperCamelCase_ : Optional[Any] = self.default_image_processor
UpperCamelCase_ : Optional[int] = prepare_img()
UpperCamelCase_ : Optional[int] = image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
UpperCamelCase_ : Tuple = model(**__lowerCAmelCase )
UpperCamelCase_ : str = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
UpperCamelCase_ : Any = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
UpperCamelCase_ : List[Any] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__lowerCAmelCase )
.eval()
)
UpperCamelCase_ : int = self.default_image_processor
UpperCamelCase_ : Tuple = prepare_img()
UpperCamelCase_ : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
UpperCamelCase_ : List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
UpperCamelCase_ : List[str] = model(**__lowerCAmelCase )
# masks_queries_logits
UpperCamelCase_ : Any = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase_ : Union[str, Any] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
UpperCamelCase_ : Optional[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
UpperCamelCase_ : List[str] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_ : Optional[Any] = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__lowerCAmelCase )
.eval()
)
UpperCamelCase_ : Optional[int] = self.default_image_processor
UpperCamelCase_ : Optional[Any] = prepare_img()
UpperCamelCase_ : str = image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
UpperCamelCase_ : List[Any] = model(**__lowerCAmelCase )
# masks_queries_logits
UpperCamelCase_ : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase_ : List[str] = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
UpperCamelCase_ : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
UpperCamelCase_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_ : Any = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__lowerCAmelCase )
.eval()
)
UpperCamelCase_ : Optional[int] = self.default_image_processor
UpperCamelCase_ : Tuple = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
UpperCamelCase_ : List[Any] = inputs["""pixel_values"""].to(__lowerCAmelCase )
UpperCamelCase_ : List[Any] = [el.to(__lowerCAmelCase ) for el in inputs["""mask_labels"""]]
UpperCamelCase_ : Tuple = [el.to(__lowerCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCamelCase_ : Union[str, Any] = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 543 | 0 |
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
lowerCamelCase__: Tuple =[int(__a ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(__a ) == 4 and all(0 <= int(__a ) <= 254 for octet in octets )
if __name__ == "__main__":
__A = input().strip()
__A = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f'{ip} is a {valid_or_invalid} IP v4 address.')
| 59 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ = 16
snake_case__ = 32
def lowerCamelCase__ ( a : Accelerator , a : int = 16 ) -> int:
"""simple docstring"""
a__ :Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
a__ :int = load_dataset("glue" , "mrpc" )
def tokenize_function(a : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
a__ :Optional[int] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a , max_length=a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ :Union[str, Any] = datasets.map(
a , batched=a , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ :Optional[int] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(a : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ :Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ :List[str] = 16
elif accelerator.mixed_precision != "no":
a__ :Optional[int] = 8
else:
a__ :Tuple = None
return tokenizer.pad(
a , padding="longest" , max_length=a , pad_to_multiple_of=a , return_tensors="pt" , )
# Instantiate dataloaders.
a__ :str = DataLoader(
tokenized_datasets["train"] , shuffle=a , collate_fn=a , batch_size=a , drop_last=a )
a__ :str = DataLoader(
tokenized_datasets["validation"] , shuffle=a , collate_fn=a , batch_size=a , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( a : List[str] , a : Dict ) -> List[str]:
"""simple docstring"""
# Initialize accelerator
a__ :Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ :Optional[int] = config["lr"]
a__ :List[str] = int(config["num_epochs"] )
a__ :List[Any] = int(config["seed"] )
a__ :List[Any] = int(config["batch_size"] )
a__ :Any = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
a__ :str = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ :Tuple = batch_size // MAX_GPU_BATCH_SIZE
a__ :List[str] = MAX_GPU_BATCH_SIZE
set_seed(a )
a__ , a__ :Tuple = get_dataloaders(a , a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ :List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ :int = model.to(accelerator.device )
# Instantiate optimizer
a__ :Any = AdamW(params=model.parameters() , lr=a )
# Instantiate scheduler
a__ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=a , num_warmup_steps=100 , num_training_steps=(len(a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ :Any = accelerator.prepare(
a , a , a , a , a )
# Now we train the model
for epoch in range(a ):
model.train()
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a__ :List[str] = model(**a )
a__ :Union[str, Any] = outputs.loss
a__ :str = loss / gradient_accumulation_steps
accelerator.backward(a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ :Optional[int] = model(**a )
a__ :str = outputs.logits.argmax(dim=-1 )
a__ , a__ :List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=a , references=a , )
a__ :Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , a )
def lowerCamelCase__ ( ) -> Any:
"""simple docstring"""
a__ :List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=a , default=a , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
a__ :Union[str, Any] = parser.parse_args()
a__ :Optional[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(a , a )
if __name__ == "__main__":
main()
| 395 | 0 |
"""simple docstring"""
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->bool:
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] ={
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """sew-d"""
def __init__( self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase=2 , _lowercase=512 , _lowercase=256 , _lowercase=True , _lowercase=True , _lowercase=("p2c", "c2p") , _lowercase="layer_norm" , _lowercase="gelu_python" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1E-7 , _lowercase=1E-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _lowercase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=0 , _lowercase=1 , _lowercase=2 , **_lowercase , ) -> str:
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : str = feat_extract_norm
_lowerCamelCase : int = feat_extract_activation
_lowerCamelCase : Optional[int] = list(_lowercase )
_lowerCamelCase : Any = list(_lowercase )
_lowerCamelCase : Dict = list(_lowercase )
_lowerCamelCase : List[Any] = conv_bias
_lowerCamelCase : Dict = num_conv_pos_embeddings
_lowerCamelCase : Optional[int] = num_conv_pos_embedding_groups
_lowerCamelCase : Dict = len(self.conv_dim )
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Optional[int] = squeeze_factor
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : Any = position_buckets
_lowerCamelCase : str = share_att_key
_lowerCamelCase : Optional[int] = relative_attention
_lowerCamelCase : Tuple = norm_rel_ebd
_lowerCamelCase : Union[str, Any] = list(_lowercase )
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : str = hidden_dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : Union[str, Any] = feat_proj_dropout
_lowerCamelCase : int = final_dropout
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Dict = feature_layer_norm_eps
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : str = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Union[str, Any] = apply_spec_augment
_lowerCamelCase : Optional[Any] = mask_time_prob
_lowerCamelCase : List[Any] = mask_time_length
_lowerCamelCase : List[str] = mask_time_min_masks
_lowerCamelCase : Optional[int] = mask_feature_prob
_lowerCamelCase : List[str] = mask_feature_length
_lowerCamelCase : int = mask_feature_min_masks
# ctc loss
_lowerCamelCase : int = ctc_loss_reduction
_lowerCamelCase : List[Any] = ctc_zero_infinity
# sequence classification
_lowerCamelCase : Optional[int] = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
@property
def a__ ( self ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 558 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCamelCase__ :
"""simple docstring"""
A__ : List[str] = XGLMConfig
A__ : List[Any] = {}
A__ : Optional[Any] = "gelu"
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=14 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=0.0_2 , ) -> int:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = d_model
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = ffn_dim
A__ = activation_function
A__ = activation_dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = None
A__ = 0
A__ = 2
A__ = 1
def snake_case__ ( self ) -> List[Any]:
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def snake_case__ ( self ) -> int:
A__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = self.get_config()
A__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case__ ( self ) -> str:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE__ , )
def snake_case__ ( self ) -> List[str]:
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
A__ : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
A__ : Optional[int] = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
A__ : Union[str, Any] = False
A__ : int = False
A__ : Optional[int] = False
def snake_case__ ( self ) -> Optional[int]:
A__ = TFXGLMModelTester(self )
A__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=37 )
def snake_case__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
@slow
def snake_case__ ( self ) -> Tuple:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def snake_case__ ( self ) -> List[Any]:
super().test_resize_token_embeddings()
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self , SCREAMING_SNAKE_CASE__=True ) -> List[str]:
A__ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
A__ = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
A__ = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
A__ = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE__ )
@slow
def snake_case__ ( self ) -> Union[str, Any]:
A__ = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
A__ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
A__ = tokenizer("Today is a nice day and" , return_tensors="tf" )
A__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
A__ = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , seed=[7, 0] )
A__ = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
A__ = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def snake_case__ ( self ) -> int:
A__ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
A__ = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
A__ = "left"
# use different length sentences to test batching
A__ = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
A__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="tf" , padding=SCREAMING_SNAKE_CASE__ )
A__ = inputs["input_ids"]
A__ = model.generate(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
A__ = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
A__ = model.generate(input_ids=SCREAMING_SNAKE_CASE__ , max_new_tokens=12 )
A__ = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
A__ = model.generate(input_ids=SCREAMING_SNAKE_CASE__ , max_new_tokens=12 )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
A__ = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [non_padded_sentence, padded_sentence] )
| 104 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
A : Dict = logging.get_logger(__name__)
@add_end_docstrings(a )
class __A( a ):
def __init__( self , *_snake_case , **_snake_case ) -> Optional[int]:
'''simple docstring'''
super().__init__(*_snake_case , **_snake_case )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None ) -> Tuple:
'''simple docstring'''
__a = {}
__a = {}
if prompt is not None:
__a = prompt
if generate_kwargs is not None:
__a = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__a = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
__a = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _snake_case , **_snake_case ) -> List[Any]:
'''simple docstring'''
return super().__call__(_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> Optional[int]:
'''simple docstring'''
__a = load_image(_snake_case )
if prompt is not None:
if not isinstance(_snake_case , _snake_case ):
raise ValueError(
F"""Received an invalid text input, got - {type(_snake_case )} - but expected a single string. """
'''Note also that one single text can be provided for conditional image to text generation.''' )
__a = self.model.config.model_type
if model_type == "git":
__a = self.image_processor(images=_snake_case , return_tensors=self.framework )
__a = self.tokenizer(text=_snake_case , add_special_tokens=_snake_case ).input_ids
__a = [self.tokenizer.cls_token_id] + input_ids
__a = torch.tensor(_snake_case ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
__a = self.image_processor(images=_snake_case , header_text=_snake_case , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__a = self.image_processor(images=_snake_case , return_tensors=self.framework )
__a = self.tokenizer(_snake_case , return_tensors=self.framework )
model_inputs.update(_snake_case )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
__a = self.image_processor(images=_snake_case , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__a = None
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> str:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , _snake_case )
and all(x is None for x in model_inputs['''input_ids'''] )
):
__a = None
if generate_kwargs is None:
__a = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__a = model_inputs.pop(self.model.main_input_name )
__a = self.model.generate(_snake_case , **_snake_case , **_snake_case )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Dict:
'''simple docstring'''
__a = []
for output_ids in model_outputs:
__a = {
'''generated_text''': self.tokenizer.decode(
_snake_case , skip_special_tokens=_snake_case , )
}
records.append(_snake_case )
return records | 219 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( __snake_case ):
def __init__( self , *lowercase__ , **lowercase__):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_)
| 716 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__A : str = pd.read_csv('sample_data.csv', header=None)
__A : Union[str, Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
__A : Union[str, Any] = df.iloc[:, 1:2]
__A : Any = actual_data.values.reshape(len_data, 1)
__A : List[Any] = MinMaxScaler().fit_transform(actual_data)
__A : List[Any] = 1_0
__A : Optional[Any] = 5
__A : Union[str, Any] = 2_0
__A : str = len_data - periods * look_back
__A : List[Any] = actual_data[:division]
__A : List[Any] = actual_data[division - look_back :]
__A , __A : str = [], []
__A , __A : Dict = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__A : Union[str, Any] = np.array(train_x)
__A : int = np.array(test_x)
__A : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
__A : str = np.array([list(i.ravel()) for i in test_y])
__A : Optional[Any] = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
__A : List[str] = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
__A : str = model.predict(x_test) | 16 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase__ ( __UpperCamelCase )-> bytes:
if len(__UpperCamelCase ) != 32:
raise ValueError("""Input must be of length 32""" )
UpperCamelCase = b""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase__ ( __UpperCamelCase )-> bytes:
if i < 0:
raise ValueError("""Input must be non-negative""" )
UpperCamelCase = format(__UpperCamelCase , """08x""" )[-8:]
UpperCamelCase = b""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def lowercase__ ( __UpperCamelCase )-> bytes:
UpperCamelCase = b""""""
for char in message:
bit_string += format(__UpperCamelCase , """08b""" ).encode("""utf-8""" )
UpperCamelCase = format(len(__UpperCamelCase ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase__ ( __UpperCamelCase )-> Generator[list[int], None, None]:
if len(__UpperCamelCase ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(__UpperCamelCase ) , 512 ):
UpperCamelCase = bit_string[pos : pos + 512]
UpperCamelCase = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase__ ( __UpperCamelCase )-> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
UpperCamelCase = format(__UpperCamelCase , """032b""" )
UpperCamelCase = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCamelCase , 2 )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> int:
return (a + b) % 2**32
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase__ ( __UpperCamelCase )-> bytes:
UpperCamelCase = preprocess(__UpperCamelCase )
UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCamelCase = 0X6745_2301
UpperCamelCase = 0XEFCD_AB89
UpperCamelCase = 0X98BA_DCFE
UpperCamelCase = 0X1032_5476
UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCamelCase ):
UpperCamelCase = aa
UpperCamelCase = ba
UpperCamelCase = ca
UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCamelCase = d ^ (b & (c ^ d))
UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCamelCase = c ^ (d & (b ^ c))
UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
UpperCamelCase = b ^ c ^ d
UpperCamelCase = (3 * i + 5) % 16
else:
UpperCamelCase = c ^ (b | not_aa(__UpperCamelCase ))
UpperCamelCase = (7 * i) % 16
UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCamelCase = d
UpperCamelCase = c
UpperCamelCase = b
UpperCamelCase = sum_aa(__UpperCamelCase , left_rotate_aa(__UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCamelCase = sum_aa(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = sum_aa(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = sum_aa(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = sum_aa(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase ) + reformat_hex(__UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase = '\\n\n'
_lowerCamelCase = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
_lowerCamelCase = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string'''),
}) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __snake_case ( self , a__ , a__ , a__ = 16 , a__ = True , a__=None):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_lowerCamelCase : List[str] = '''cuda'''
else:
_lowerCamelCase : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_lowerCamelCase : Any = AutoModelForCausalLM.from_pretrained(A__)
_lowerCamelCase : List[str] = model.to(A__)
_lowerCamelCase : str = AutoTokenizer.from_pretrained(A__)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_lowerCamelCase : int = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(A__) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_lowerCamelCase : List[str] = model.config.max_length - 1
else:
_lowerCamelCase : Tuple = model.config.max_length
_lowerCamelCase : Any = tokenizer(
A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , return_tensors='''pt''' , return_attention_mask=A__ , ).to(A__)
_lowerCamelCase : str = encodings['''input_ids''']
_lowerCamelCase : Dict = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_lowerCamelCase : List[Any] = []
_lowerCamelCase : List[Any] = CrossEntropyLoss(reduction='''none''')
for start_index in logging.tqdm(range(0 , len(A__) , A__)):
_lowerCamelCase : Optional[int] = min(start_index + batch_size , len(A__))
_lowerCamelCase : Optional[Any] = encoded_texts[start_index:end_index]
_lowerCamelCase : Any = attn_masks[start_index:end_index]
if add_start_token:
_lowerCamelCase : Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(A__)
_lowerCamelCase : Optional[Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1)
_lowerCamelCase : List[str] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(A__), attn_mask] , dim=1)
_lowerCamelCase : Optional[int] = encoded_batch
with torch.no_grad():
_lowerCamelCase : int = model(A__ , attention_mask=A__).logits
_lowerCamelCase : Union[str, Any] = out_logits[..., :-1, :].contiguous()
_lowerCamelCase : str = labels[..., 1:].contiguous()
_lowerCamelCase : Optional[Any] = attn_mask[..., 1:].contiguous()
_lowerCamelCase : Tuple = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2) , A__) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A__)}
| 700 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(a__ , '''hidden_sizes'''))
self.parent.assertTrue(hasattr(a__ , '''num_attention_heads'''))
class __A :
"""simple docstring"""
def __init__( self , a__ , a__=13 , a__=64 , a__=3 , a__=3 , a__=2 , a__=1 , a__=16 , a__=[128, 256, 384] , a__=[4, 6, 8] , a__=[2, 3, 4] , a__=[16, 16, 16] , a__=0 , a__=[2, 2, 2] , a__=[2, 2, 2] , a__=0.02 , a__=True , a__=True , a__=2 , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Any = kernel_size
_lowerCamelCase : int = stride
_lowerCamelCase : Dict = padding
_lowerCamelCase : Union[str, Any] = hidden_sizes
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : Any = depths
_lowerCamelCase : List[Any] = key_dim
_lowerCamelCase : Optional[Any] = drop_path_rate
_lowerCamelCase : Union[str, Any] = patch_size
_lowerCamelCase : int = attention_ratio
_lowerCamelCase : Any = mlp_ratio
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Dict = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_lowerCamelCase : str = is_training
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : List[Any] = num_labels
_lowerCamelCase : Union[str, Any] = initializer_range
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels)
_lowerCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self):
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __snake_case ( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = LevitModel(config=a__)
model.to(a__)
model.eval()
_lowerCamelCase : List[str] = model(a__)
_lowerCamelCase : List[str] = (self.image_size, self.image_size)
_lowerCamelCase, _lowerCamelCase : Optional[int] = image_size[0], image_size[1]
for _ in range(4):
_lowerCamelCase : int = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1)
_lowerCamelCase : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]) , )
def __snake_case ( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : Dict = LevitForImageClassification(a__)
model.to(a__)
model.eval()
_lowerCamelCase : List[Any] = model(a__ , labels=a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( lowerCamelCase__ ,lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = LevitModelTester(self)
_lowerCamelCase : Tuple = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37)
def __snake_case ( self):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self):
"""simple docstring"""
return
@unittest.skip(reason='''Levit does not use inputs_embeds''')
def __snake_case ( self):
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''')
def __snake_case ( self):
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not output attentions''')
def __snake_case ( self):
"""simple docstring"""
pass
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(a__)
_lowerCamelCase : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__)
def __snake_case ( self):
"""simple docstring"""
def check_hidden_states_output(a__ , a__ , a__):
_lowerCamelCase : List[Any] = model_class(a__)
model.to(a__)
model.eval()
with torch.no_grad():
_lowerCamelCase : Any = model(**self._prepare_for_class(a__ , a__))
_lowerCamelCase : int = outputs.hidden_states
_lowerCamelCase : List[str] = len(self.model_tester.depths) + 1
self.assertEqual(len(a__) , a__)
_lowerCamelCase : Tuple = (self.model_tester.image_size, self.model_tester.image_size)
_lowerCamelCase, _lowerCamelCase : Tuple = image_size[0], image_size[1]
for _ in range(4):
_lowerCamelCase : Optional[Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
_lowerCamelCase : List[Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_lowerCamelCase, _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = True
check_hidden_states_output(a__ , a__ , a__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[str] = True
check_hidden_states_output(a__ , a__ , a__)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def __snake_case ( self):
"""simple docstring"""
pass
def __snake_case ( self , a__ , a__ , a__=False):
"""simple docstring"""
_lowerCamelCase : Dict = super()._prepare_for_class(a__ , a__ , return_labels=a__)
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__)
def __snake_case ( self):
"""simple docstring"""
if not self.model_tester.is_training:
return
_lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a__)
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_lowerCamelCase : Optional[Any] = model_class(a__)
model.to(a__)
model.train()
_lowerCamelCase : Any = self._prepare_for_class(a__ , a__ , return_labels=a__)
_lowerCamelCase : Tuple = model(**a__).loss
loss.backward()
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(a__) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_lowerCamelCase : List[Any] = model_class(a__)
model.gradient_checkpointing_enable()
model.to(a__)
model.train()
_lowerCamelCase : int = self._prepare_for_class(a__ , a__ , return_labels=a__)
_lowerCamelCase : Any = model(**a__).loss
loss.backward()
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a__),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}"""):
_lowerCamelCase : Tuple = problem_type['''title''']
_lowerCamelCase : Tuple = problem_type['''num_labels''']
_lowerCamelCase : int = model_class(a__)
model.to(a__)
model.train()
_lowerCamelCase : int = self._prepare_for_class(a__ , a__ , return_labels=a__)
if problem_type["num_labels"] > 1:
_lowerCamelCase : int = inputs['''labels'''].unsqueeze(1).repeat(1 , problem_type['''num_labels'''])
_lowerCamelCase : Tuple = inputs['''labels'''].to(problem_type['''dtype'''])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a__) as warning_list:
_lowerCamelCase : Any = model(**a__).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""")
loss.backward()
@slow
def __snake_case ( self):
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = LevitModel.from_pretrained(a__)
self.assertIsNotNone(a__)
def __UpperCAmelCase( ):
_lowerCamelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self):
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
a__)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : str = image_processor(images=a__ , return_tensors='''pt''').to(a__)
# forward pass
with torch.no_grad():
_lowerCamelCase : int = model(**a__)
# verify the logits
_lowerCamelCase : List[str] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , a__)
_lowerCamelCase : Optional[Any] = torch.tensor([1.0448, -0.3745, -1.8317]).to(a__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4))
| 613 | 0 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def A_( A : Union[str, Any]):
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set())
@pytest.fixture
def A_( A : Dict):
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = metric_id
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = [MetricMock(snake_case_) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock())
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))])
def A_( A : int , A : List[str] , A : Union[str, Any] , A : List[str] , A : Tuple):
if "tmp_path" in args:
UpperCamelCase = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args)
with pytest.warns(A , match='https://huggingface.co/docs/evaluate'):
func(*A)
| 3 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__: Optional[Any] = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Optional[Any] = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Dict = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A__: Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 380 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any]=13 , _SCREAMING_SNAKE_CASE: Any=7 , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: Any=False , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Any=33 , _SCREAMING_SNAKE_CASE: Any=32 , _SCREAMING_SNAKE_CASE: Optional[Any]=5 , _SCREAMING_SNAKE_CASE: str=4 , _SCREAMING_SNAKE_CASE: Any=37 , _SCREAMING_SNAKE_CASE: str="gelu" , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: int=512 , _SCREAMING_SNAKE_CASE: int=16 , _SCREAMING_SNAKE_CASE: Tuple=2 , _SCREAMING_SNAKE_CASE: Any=0.02 , _SCREAMING_SNAKE_CASE: str=3 , _SCREAMING_SNAKE_CASE: Dict=4 , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Optional[int] = batch_size
__lowerCAmelCase : Optional[Any] = seq_length
__lowerCAmelCase : int = is_training
__lowerCAmelCase : List[Any] = use_input_mask
__lowerCAmelCase : Tuple = use_token_type_ids
__lowerCAmelCase : Optional[Any] = use_labels
__lowerCAmelCase : int = vocab_size
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : Tuple = num_hidden_layers
__lowerCAmelCase : Dict = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = type_vocab_size
__lowerCAmelCase : Tuple = type_sequence_label_size
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Union[str, Any] = num_labels
__lowerCAmelCase : Any = num_choices
__lowerCAmelCase : Optional[int] = scope
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase : int = None
if self.use_input_mask:
__lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : str = None
__lowerCAmelCase : Optional[Any] = None
if self.use_labels:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: List[str]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = EsmModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = EsmForMaskedLM(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Any) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.num_labels
__lowerCAmelCase : Tuple = EsmForTokenClassification(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: int) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
__lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = ()
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = EsmModelTester(self)
__lowerCAmelCase : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase : Optional[Any] = type
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Union[str, Any] = EsmModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowerCAmelCase : Optional[int] = EsmEmbeddings(config=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]])
__lowerCAmelCase : List[str] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
])
__lowerCAmelCase : Dict = create_position_ids_from_input_ids(_SCREAMING_SNAKE_CASE , model.padding_idx)
self.assertEqual(position_ids.shape , expected_positions.shape)
self.assertTrue(torch.all(torch.eq(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)))
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Any:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()[0]
__lowerCAmelCase : Dict = EsmEmbeddings(config=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = torch.empty(2 , 4 , 30)
__lowerCAmelCase : Optional[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowerCAmelCase : Optional[int] = torch.as_tensor([expected_single_positions, expected_single_positions])
__lowerCAmelCase : Union[str, Any] = embeddings.create_position_ids_from_inputs_embeds(_SCREAMING_SNAKE_CASE)
self.assertEqual(position_ids.shape , expected_positions.shape)
self.assertTrue(torch.all(torch.eq(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)))
@unittest.skip("Esm does not support embedding resizing")
def _SCREAMING_SNAKE_CASE ( self: str) -> Dict:
"""simple docstring"""
pass
@unittest.skip("Esm does not support embedding resizing")
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
pass
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[int]:
"""simple docstring"""
with torch.no_grad():
__lowerCAmelCase : str = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D")
model.eval()
__lowerCAmelCase : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]])
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : Any = 33
__lowerCAmelCase : Dict = torch.Size((1, 6, vocab_size))
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4))
@slow
def _SCREAMING_SNAKE_CASE ( self: Any) -> str:
"""simple docstring"""
with torch.no_grad():
__lowerCAmelCase : str = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D")
model.eval()
__lowerCAmelCase : List[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE)[0]
# compare the actual values for a slice.
__lowerCAmelCase : Optional[int] = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4)) | 615 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__snake_case : Tuple = logging.get_logger(__name__)
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
__lowerCAmelCase : Optional[Any] = set()
__lowerCAmelCase : List[Any] = []
def parse_line(__snake_case ):
for line in fp:
if isinstance(__snake_case ,__snake_case ):
__lowerCAmelCase : Tuple = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(__snake_case ) > 0:
__lowerCAmelCase : List[Any] = "\n".join(__snake_case )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(__snake_case )
buffer.clear()
continue
else:
__lowerCAmelCase : List[str] = line.strip()
buffer.append(__snake_case )
if from_gh:
for filename in os.listdir(__snake_case ):
__lowerCAmelCase : List[str] = os.path.join(__snake_case ,__snake_case )
if not os.path.isdir(__snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(__snake_case ) as fp:
parse_line(__snake_case )
else:
try:
with zipfile.ZipFile(__snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(__snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__snake_case ) as fp:
parse_line(__snake_case )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def _lowercase ( __snake_case ,__snake_case ) -> Any:
__lowerCAmelCase : Any = set()
__lowerCAmelCase : str = [os.path.join(__snake_case ,__snake_case ) for p in os.listdir(__snake_case ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__snake_case ,__snake_case ) )
return selected_warnings
if __name__ == "__main__":
def _lowercase ( __snake_case ) -> Optional[Any]:
return values.split("," )
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
__snake_case : Tuple = parser.parse_args()
__snake_case : Any = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__snake_case : str = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__snake_case : Tuple = extract_warnings(args.output_dir, args.targets)
__snake_case : List[str] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 615 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : int = """▁"""
a__ : Tuple = {"""vocab_file""": """sentencepiece.bpe.model"""}
a__ : Optional[int] = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
a__ : Tuple = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , a_ : Optional[int] , a_ : Optional[int]="<s>" , a_ : List[str]="</s>" , a_ : List[str]="</s>" , a_ : str="<s>" , a_ : int="<unk>" , a_ : Optional[int]="<pad>" , a_ : str="<mask>" , a_ : Optional[Dict[str, Any]] = None , **a_ : Dict , ):
"""simple docstring"""
lowerCamelCase__ = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a_ ) )
lowerCamelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase__ = 1
lowerCamelCase__ = len(self.sp_model ) + self.fairseq_offset
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ):
"""simple docstring"""
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
lowerCamelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , a_ : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self : Dict , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self : Any , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def _UpperCamelCase ( self : Optional[int] , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self : int , a_ : str ):
"""simple docstring"""
return self.sp_model.encode(a_ , out_type=a_ )
def _UpperCamelCase ( self : Optional[int] , a_ : Optional[int] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__ = self.sp_model.PieceToId(a_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCamelCase ( self : List[str] , a_ : Any ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCamelCase ( self : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = """""".join(a_ ).replace(a_ , """ """ ).strip()
return out_string
def _UpperCamelCase ( self : List[Any] , a_ : str , a_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
a_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , """wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 165 |
import qiskit
def snake_case (UpperCamelCase : int = 2 ):
'''simple docstring'''
lowerCamelCase__ = qubits
# Using Aer's simulator
lowerCamelCase__ = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
lowerCamelCase__ = qiskit.QuantumCircuit(UpperCamelCase , UpperCamelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCamelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCamelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCamelCase ) ) , list(range(UpperCamelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCamelCase__ = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 165 | 1 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
UpperCamelCase = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
UpperCamelCase = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return float((preds == labels).mean() )
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : Any ):
"""simple docstring"""
lowerCAmelCase__ = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ):
"""simple docstring"""
lowerCAmelCase__ = np.array(lowerCAmelCase_ )
lowerCAmelCase__ = np.array(lowerCAmelCase_ )
lowerCAmelCase__ = en_sentvecs.shape[0]
# mean centering
lowerCAmelCase__ = en_sentvecs - np.mean(lowerCAmelCase_ , axis=0 )
lowerCAmelCase__ = in_sentvecs - np.mean(lowerCAmelCase_ , axis=0 )
lowerCAmelCase__ = cdist(lowerCAmelCase_ , lowerCAmelCase_ , "cosine" )
lowerCAmelCase__ = np.array(range(lowerCAmelCase_ ) )
lowerCAmelCase__ = sim.argsort(axis=1 )[:, :10]
lowerCAmelCase__ = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def a ( self : List[str] ) -> List[str]:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 125 |
UpperCamelCase = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 125 | 1 |
def _a ( UpperCAmelCase = 1000 ) -> int:
"""simple docstring"""
lowerCamelCase__ : List[str] = 3
lowerCamelCase__ : int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 315 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = XLMTokenizer
_UpperCAmelCase : List[Any] = False
def __lowerCamelCase ( self : Union[str, Any] ) ->int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCamelCase__ : List[Any] = dict(zip(A , range(len(A ) ) ) )
lowerCamelCase__ : str = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(A ) )
def __lowerCamelCase ( self : str , A : Dict ) ->Tuple:
lowerCamelCase__ : str = '''lower newer'''
lowerCamelCase__ : Optional[int] = '''lower newer'''
return input_text, output_text
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[Any]:
lowerCamelCase__ : Dict = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase__ : Optional[Any] = '''lower'''
lowerCamelCase__ : Any = ['''low''', '''er</w>''']
lowerCamelCase__ : List[Any] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
lowerCamelCase__ : int = tokens + ['''<unk>''']
lowerCamelCase__ : List[str] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
@slow
def __lowerCamelCase ( self : Union[str, Any] ) ->Union[str, Any]:
lowerCamelCase__ : Tuple = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
lowerCamelCase__ : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=A )
lowerCamelCase__ : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A )
lowerCamelCase__ : str = tokenizer.build_inputs_with_special_tokens(A )
lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 315 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class snake_case__ (__lowerCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = """cvt"""
def __init__( self : Union[str, Any] , __lowerCamelCase : Any=3 , __lowerCamelCase : List[str]=[7, 3, 3] , __lowerCamelCase : Dict=[4, 2, 2] , __lowerCamelCase : List[Any]=[2, 1, 1] , __lowerCamelCase : Union[str, Any]=[64, 1_92, 3_84] , __lowerCamelCase : Optional[int]=[1, 3, 6] , __lowerCamelCase : Dict=[1, 2, 10] , __lowerCamelCase : List[Any]=[4.0, 4.0, 4.0] , __lowerCamelCase : str=[0.0, 0.0, 0.0] , __lowerCamelCase : Any=[0.0, 0.0, 0.0] , __lowerCamelCase : List[str]=[0.0, 0.0, 0.1] , __lowerCamelCase : int=[True, True, True] , __lowerCamelCase : Tuple=[False, False, True] , __lowerCamelCase : str=["dw_bn", "dw_bn", "dw_bn"] , __lowerCamelCase : List[Any]=[3, 3, 3] , __lowerCamelCase : Tuple=[1, 1, 1] , __lowerCamelCase : List[str]=[2, 2, 2] , __lowerCamelCase : List[str]=[1, 1, 1] , __lowerCamelCase : int=[1, 1, 1] , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : int=1e-12 , **__lowerCamelCase : List[str] , ) -> List[Any]:
super().__init__(**_UpperCamelCase )
a = num_channels
a = patch_sizes
a = patch_stride
a = patch_padding
a = embed_dim
a = num_heads
a = depth
a = mlp_ratio
a = attention_drop_rate
a = drop_rate
a = drop_path_rate
a = qkv_bias
a = cls_token
a = qkv_projection_method
a = kernel_qkv
a = padding_kv
a = stride_kv
a = padding_q
a = stride_q
a = initializer_range
a = layer_norm_eps
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def a__ ( *snake_case ):
"""simple docstring"""
with open(snake_case , '''r''' ) as fh:
fcntl.flock(snake_case , fcntl.LOCK_EX )
try:
print(*snake_case )
finally:
fcntl.flock(snake_case , fcntl.LOCK_UN )
lowercase_ = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
lowercase_ = torch.device("""cuda""", local_rank)
lowercase_ = socket.gethostname()
lowercase_ = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowercase_ = dist.get_rank()
lowercase_ = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 74 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : int = tempfile.mkdtemp()
__A : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__A : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__A : Union[str, Any] = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self , **lowerCamelCase ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase__ ( self , **lowerCamelCase ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase__ ( self , **lowerCamelCase ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : int = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = self.get_tokenizer()
__A : int = self.get_rust_tokenizer()
__A : List[str] = self.get_image_processor()
__A : List[Any] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__A : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
__A : int = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__A : List[str] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__A : int = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__A : Optional[int] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : List[str] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : int = self.prepare_image_inputs()
__A : Optional[int] = image_processor(lowerCamelCase , return_tensors="np" )
__A : Dict = processor(images=lowerCamelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : str = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Optional[int] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : Optional[Any] = "lower newer"
__A : Union[str, Any] = processor(text=lowerCamelCase )
__A : int = tokenizer(lowerCamelCase , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : int = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : Tuple = "lower newer"
__A : int = self.prepare_image_inputs()
__A : str = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Dict = self.get_image_processor()
__A : Tuple = self.get_tokenizer()
__A : Dict = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Optional[int] = processor.batch_decode(lowerCamelCase )
__A : Optional[int] = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : str = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Union[str, Any] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : int = "lower newer"
__A : str = self.prepare_image_inputs()
__A : Tuple = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 111 | 0 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCAmelCase : Optional[int] =get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase : Any =get_tests_dir('fixtures/vocab.json')
lowerCAmelCase : Union[str, Any] =get_tests_dir('fixtures')
class _a ( unittest.TestCase ):
_UpperCamelCase: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : Optional[Any] = 0
def _snake_case ( self ) -> Dict:
lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : Tuple = WavaVecaConfig()
lowerCAmelCase : Dict = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
lowerCAmelCase : List[Any] = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
copyfile(lowercase_ , os.path.join(lowercase_ , """vocab.json""" ) )
lowerCAmelCase : Tuple = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor()
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowerCAmelCase : Tuple = WavaVecaProcessor(lowercase_ , lowercase_ )
# save in new folder
processor.save_pretrained(lowercase_ )
# drop `processor_class` in tokenizer
with open(os.path.join(lowercase_ , lowercase_ ) , """r""" ) as f:
lowerCAmelCase : Optional[Any] = json.load(lowercase_ )
config_dict.pop("""processor_class""" )
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" ) as f:
f.write(json.dumps(lowercase_ ) )
lowerCAmelCase : int = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : Tuple = WavaVecaFeatureExtractor()
lowerCAmelCase : int = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowerCAmelCase : Dict = WavaVecaProcessor(lowercase_ , lowercase_ )
# save in new folder
processor.save_pretrained(lowercase_ )
# drop `processor_class` in feature extractor
with open(os.path.join(lowercase_ , lowercase_ ) , """r""" ) as f:
lowerCAmelCase : List[str] = json.load(lowercase_ )
config_dict.pop("""processor_class""" )
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" ) as f:
f.write(json.dumps(lowercase_ ) )
lowerCAmelCase : List[str] = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[str] = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(lowercase_ )
# copy relevant files
copyfile(lowercase_ , os.path.join(lowercase_ , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" ) as f:
f.write("""{}""" )
lowerCAmelCase : int = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self ) -> Dict:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
lowerCAmelCase : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=lowercase_ )
lowerCAmelCase : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=lowercase_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
lowerCAmelCase : List[str] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
lowerCAmelCase : Union[str, Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowerCAmelCase : List[Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=lowercase_ , use_fast=lowercase_ )
lowerCAmelCase : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _snake_case ( self ) -> str:
try:
AutoConfig.register("""custom""" , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoProcessor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoProcessor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : List[Any] = os.path.join(lowercase_ , """vocab.txt""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCAmelCase : Any = CustomTokenizer(lowercase_ )
lowerCAmelCase : Tuple = CustomProcessor(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowercase_ )
lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ) -> Dict:
class _a ( snake_case_ ):
_UpperCamelCase: Any = False
class _a ( snake_case_ ):
_UpperCamelCase: Any = False
class _a ( snake_case_ ):
_UpperCamelCase: Any = "AutoFeatureExtractor"
_UpperCamelCase: Optional[Any] = "AutoTokenizer"
_UpperCamelCase: List[Any] = False
try:
AutoConfig.register("""custom""" , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoProcessor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local classes.
lowerCAmelCase : List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=lowercase_ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowerCAmelCase : List[str] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=lowercase_ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ) -> Dict:
lowerCAmelCase : int = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _a ( unittest.TestCase ):
_UpperCamelCase: Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _snake_case ( cls ) -> Tuple:
lowerCAmelCase : Optional[int] = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def _snake_case ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : Dict = WavaVecaProcessor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_ , """test-processor""" ) , push_to_hub=lowercase_ , use_auth_token=self._token )
lowerCAmelCase : List[Any] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[int] = WavaVecaProcessor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_ , """test-processor-org""" ) , push_to_hub=lowercase_ , use_auth_token=self._token , organization="""valid_org""" , )
lowerCAmelCase : str = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _snake_case ( self ) -> str:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowerCAmelCase : Union[str, Any] = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """vocab.txt""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCAmelCase : Dict = CustomTokenizer(lowercase_ )
lowerCAmelCase : List[Any] = CustomProcessor(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
lowerCAmelCase : int = Repository(lowercase_ , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(lowercase_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowercase_ , """tokenizer_config.json""" ) ) as f:
lowerCAmelCase : Optional[int] = json.load(lowercase_ )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , """custom_processing.py""" ) ) )
repo.push_to_hub()
lowerCAmelCase : int = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 693 |
# Imports
import numpy as np
class _a :
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> List[Any]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
if red is not None:
lowerCAmelCase : str = red
if green is not None:
lowerCAmelCase : Optional[int] = green
if blue is not None:
lowerCAmelCase : Optional[int] = blue
if red_edge is not None:
lowerCAmelCase : Tuple = red_edge
if nir is not None:
lowerCAmelCase : Union[str, Any] = nir
return True
def _snake_case ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[int]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
lowerCAmelCase : int = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self ) -> Dict:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self ) -> Optional[Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self ) -> List[str]:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self ) -> Tuple:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self ) -> Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self ) -> List[str]:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self ) -> int:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , lowercase_=0.0_8 , lowercase_=1.2_2 , lowercase_=0.0_3 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self ) -> Optional[Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self ) -> Any:
return (self.nir / self.green) - 1
def _snake_case ( self ) -> List[Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self ) -> str:
return (self.red - self.blue) / self.red
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self ) -> Optional[Any]:
return self.nir - self.green
def _snake_case ( self ) -> int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , lowercase_=0.1_6 ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , lowercase_=0.5 ) -> List[str]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self ) -> Any:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self ) -> Any:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self ) -> str:
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case ( self ) -> Union[str, Any]:
return self.nir / self.red
def _snake_case ( self ) -> Tuple:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self ) -> int:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self ) -> Dict:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self ) -> Optional[int]:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self ) -> int:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self ) -> List[str]:
return self.nir / self.red
def _snake_case ( self ) -> int:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self ) -> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 693 | 1 |
from string import ascii_lowercase, ascii_uppercase
def __lowerCAmelCase ( _A ):
"""simple docstring"""
if not sentence:
return ""
_lowercase = dict(zip(_A ,_A ) )
return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 398 | import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
A_: int = logging.get_logger(__name__)
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 398 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowerCAmelCase__ :
A_ : Dict = PegasusConfig
A_ : Union[str, Any] = {}
A_ : Optional[Any] = 'gelu'
def __init__( self : Any , __UpperCamelCase : int , __UpperCamelCase : Tuple=13 , __UpperCamelCase : Any=7 , __UpperCamelCase : str=True , __UpperCamelCase : List[str]=False , __UpperCamelCase : Union[str, Any]=99 , __UpperCamelCase : Tuple=32 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Any=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : List[str]=40 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Tuple=1 , __UpperCamelCase : Union[str, Any]=0 , ) -> Optional[int]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def __UpperCamelCase ( self : int ) -> List[str]:
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def __UpperCamelCase ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> List[Any]:
A = TFPegasusModel(config=__UpperCamelCase ).get_decoder()
A = inputs_dict['input_ids']
A = input_ids[:1, :]
A = inputs_dict['attention_mask'][:1, :]
A = inputs_dict['head_mask']
A = 1
# first forward pass
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
A , A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Tuple=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
A = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : List[str] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
A_ : Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
A_ : List[str] = (
{
'conversational': TFPegasusForConditionalGeneration,
'feature-extraction': TFPegasusModel,
'summarization': TFPegasusForConditionalGeneration,
'text2text-generation': TFPegasusForConditionalGeneration,
'translation': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ : Any = True
A_ : str = False
A_ : int = False
def __UpperCamelCase ( self : int ) -> Any:
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
A_ : Dict = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
A_ : Any = [
'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'
' reduce the risk of wildfires.',
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
A_ : Optional[Any] = 'google/pegasus-xsum'
@cached_property
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __UpperCamelCase ( self : List[str] , **__UpperCamelCase : List[Any] ) -> Any:
A = self.translate_src_text(**__UpperCamelCase )
assert self.expected_text == generated_words
def __UpperCamelCase ( self : str , **__UpperCamelCase : Dict ) -> Union[str, Any]:
A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors='tf' )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def __UpperCamelCase ( self : str ) -> str:
self._assert_generated_batch_equal_expected() | 224 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase_ ( lowerCAmelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A , A = emb.weight.shape
A = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
A = emb.weight.data
return lin_layer
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]=None ) -> Optional[int]:
'''simple docstring'''
A = {}
for old_key in state_dict.keys():
A = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A = key.replace('moe_layer.experts.0' , F'''ffn.experts.expert_{expert_idx}''' )
else:
A = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
A = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
A = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
A = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
A = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
A = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
A = key.replace('final_layer_norm' , 'ff_layer_norm' )
A = state_dict[old_key]
return new_dict
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str = WEIGHTS_NAME ) -> List[str]:
'''simple docstring'''
A = []
A = 0
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
for expert in range(lowerCAmelCase__ ):
A = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(lowerCAmelCase__ ):
A = torch.load(lowerCAmelCase__ )['model']
remove_ignore_keys_(lowerCAmelCase__ )
A = rename_fairseq_keys(lowerCAmelCase__ , lowerCAmelCase__ )
A = os.path.join(
lowerCAmelCase__ , weights_name.replace('.bin' , F'''-{len(lowerCAmelCase__ )+1:05d}-of-???.bin''' ) )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(lowerCAmelCase__ )[0]].dtype )
# Add the last block
A = os.path.join(lowerCAmelCase__ , weights_name.replace('.bin' , F'''-{len(lowerCAmelCase__ )+1:05d}-of-???.bin''' ) )
A = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(lowerCAmelCase__ )
A = rename_fairseq_keys(lowerCAmelCase__ , lowerCAmelCase__ )
A = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(lowerCAmelCase__ ) == 1:
A = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
# Otherwise, let's build the index
A = {}
for idx, shard in enumerate(lowerCAmelCase__ ):
A = weights_name.replace('.bin' , F'''-{idx+1:05d}-of-{len(lowerCAmelCase__ ):05d}.bin''' )
A = os.path.join(lowerCAmelCase__ , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
for key in shard:
A = shard_file
# Add the metadata
A = {'total_size': total_size}
A = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , 'w' , encoding='utf-8' ) as f:
A = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + '\n'
f.write(lowerCAmelCase__ )
return metadata, index
if __name__ == "__main__":
__snake_case :Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__snake_case :int =parser.parse_args()
__snake_case , __snake_case :List[Any] =shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__snake_case :Dict =NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__snake_case :Optional[int] =NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path) | 224 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ ):
"""simple docstring"""
__lowerCAmelCase : Dict ='''distilbert'''
__lowerCAmelCase : List[str] ={
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self :Tuple, snake_case :Dict=3_0522, snake_case :int=512, snake_case :Dict=False, snake_case :List[str]=6, snake_case :Any=12, snake_case :List[str]=768, snake_case :List[str]=4 * 768, snake_case :int=0.1, snake_case :List[Any]=0.1, snake_case :str="gelu", snake_case :Any=0.0_2, snake_case :Optional[Any]=0.1, snake_case :List[Any]=0.2, snake_case :List[str]=0, **snake_case :Optional[int], ):
"""simple docstring"""
_lowercase =vocab_size
_lowercase =max_position_embeddings
_lowercase =sinusoidal_pos_embds
_lowercase =n_layers
_lowercase =n_heads
_lowercase =dim
_lowercase =hidden_dim
_lowercase =dropout
_lowercase =attention_dropout
_lowercase =activation
_lowercase =initializer_range
_lowercase =qa_dropout
_lowercase =seq_classif_dropout
super().__init__(**_lowerCamelCase, pad_token_id=_lowerCamelCase)
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
if self.task == "multiple-choice":
_lowercase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 181 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
A_ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A_ : Optional[int] = 256
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Union[str, Any] =['''melgan''']
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
# From MELGAN
UpperCamelCase_: Any = math.log(1e-5 ) # Matches MelGAN training.
UpperCamelCase_: List[Any] = 4.0 # Largest value for most examples
UpperCamelCase_: Tuple = 1_2_8
self.register_modules(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
def _a ( self , _lowerCamelCase , _lowerCamelCase=(-1.0, 1.0) , _lowerCamelCase=False ):
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = output_range
if clip:
UpperCamelCase_: int = torch.clip(_lowerCamelCase , self.min_value , self.max_value )
# Scale to [0, 1].
UpperCamelCase_: List[Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _a ( self , _lowerCamelCase , _lowerCamelCase=(-1.0, 1.0) , _lowerCamelCase=False ):
UpperCamelCase_ ,UpperCamelCase_: Dict = input_range
UpperCamelCase_: List[str] = torch.clip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase_: Optional[Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: str = input_tokens > 0
UpperCamelCase_ ,UpperCamelCase_: Tuple = self.notes_encoder(
encoder_input_tokens=_lowerCamelCase , encoder_inputs_mask=_lowerCamelCase )
UpperCamelCase_ ,UpperCamelCase_: Any = self.continuous_encoder(
encoder_inputs=_lowerCamelCase , encoder_inputs_mask=_lowerCamelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = noise_time
if not torch.is_tensor(_lowerCamelCase ):
UpperCamelCase_: List[str] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_lowerCamelCase ) and len(timesteps.shape ) == 0:
UpperCamelCase_: Dict = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase_: Union[str, Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase_: Any = self.decoder(
encodings_and_masks=_lowerCamelCase , decoder_input_tokens=_lowerCamelCase , decoder_noise_time=_lowerCamelCase )
return logits
@torch.no_grad()
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = 1_0_0 , _lowerCamelCase = True , _lowerCamelCase = "numpy" , _lowerCamelCase = None , _lowerCamelCase = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCamelCase , _lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(_lowerCamelCase )}.''' )
UpperCamelCase_: List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
UpperCamelCase_: str = np.zeros([1, 0, self.n_dims] , np.floataa )
UpperCamelCase_: Dict = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_lowerCamelCase , device=self.device )
for i, encoder_input_tokens in enumerate(_lowerCamelCase ):
if i == 0:
UpperCamelCase_: str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase_: Tuple = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_lowerCamelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase_: Any = ones
UpperCamelCase_: str = self.scale_features(
_lowerCamelCase , output_range=[-1.0, 1.0] , clip=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_lowerCamelCase , continuous_mask=_lowerCamelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase_: List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_lowerCamelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase_: int = self.decode(
encodings_and_masks=_lowerCamelCase , input_tokens=_lowerCamelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCamelCase_: Tuple = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
UpperCamelCase_: List[Any] = self.scale_to_features(_lowerCamelCase , input_range=[-1.0, 1.0] )
UpperCamelCase_: Any = mel[:1]
UpperCamelCase_: List[str] = mel.cpu().float().numpy()
UpperCamelCase_: Tuple = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCamelCase , _lowerCamelCase )
logger.info('Generated segment' , _lowerCamelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
UpperCamelCase_: int = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase_: int = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_lowerCamelCase ) | 57 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( a : Optional[Any] , a : Dict , a : Union[str, Any]=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
snake_case__ = nn.Parameter(a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
snake_case__ = nn.Parameter(a )
def _UpperCAmelCase ( a : List[Any] , a : List[str] , a : List[Any] ):
# set torch weights for 1-to-1 comparison
snake_case__ = np.asarray(weights[0] )
snake_case__ = np.asarray(weights[1] )
snake_case__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(a ).transpose(1 , 2 ).contiguous().view(-1 , a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a ).transpose(1 , 2 ).contiguous().view(-1 , a ) , )
set_param(
torch_layer.output.dense , torch.tensor(a ).view(-1 , a ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( a : Optional[int] , a : Dict , a : Optional[int] ):
# set torch weights for 1-to-1 comparison
snake_case__ = np.asarray(weights[0] )
snake_case__ = np.asarray(weights[1] )
snake_case__ = np.asarray(weights[2] )
snake_case__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(a ).transpose(1 , 2 ).contiguous().view(-1 , a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(a ).transpose(1 , 2 ).contiguous().view(-1 , a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a ).transpose(1 , 2 ).contiguous().view(-1 , a ) , )
set_param(
torch_layer.output.dense , torch.tensor(a ).view(-1 , a ).contiguous().transpose(0 , 1 ) , )
def _UpperCAmelCase ( a : List[Any] , a : Tuple , a : List[Any] ):
# layernorm 1
snake_case__ = weights[0][0][0]
snake_case__ = np.asarray(layer_norm_a[0] )
snake_case__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(a ) , torch.tensor(a ) , )
# lsh weights + output
snake_case__ = weights[0][1]
if len(a ) < 4:
set_layer_weights_in_torch_lsh(a , torch_block.attention , a )
else:
set_layer_weights_in_torch_local(a , torch_block.attention , a )
# intermediate weighs
snake_case__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(a ) == 4:
snake_case__ = intermediate_weights[2]
# layernorm 2
snake_case__ = np.asarray(intermediate_weights[0][0] )
snake_case__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(a ) , torch.tensor(a ) , )
# intermediate dense
snake_case__ = np.asarray(intermediate_weights[1][0] )
snake_case__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(a ).transpose(0 , 1 ).contiguous() , torch.tensor(a ) , )
# intermediate out
snake_case__ = np.asarray(intermediate_weights[4][0] )
snake_case__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(a ).transpose(0 , 1 ).contiguous() , torch.tensor(a ) , )
def _UpperCAmelCase ( a : List[Any] , a : Dict , a : Union[str, Any] ):
# reformer model
snake_case__ = torch_model.reformer
# word embeds
snake_case__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(a ) , )
if isinstance(weights[3] , a ):
snake_case__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
snake_case__ = nn.Parameter(torch.tensor(a ) )
snake_case__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(a , a , a )
# output layer norm
snake_case__ = np.asarray(weights[7][0] )
snake_case__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(a ) , torch.tensor(a ) , )
# output embeddings
snake_case__ = np.asarray(weights[9][0] )
snake_case__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(a ).transpose(0 , 1 ).contiguous() , torch.tensor(a ) , )
def _UpperCAmelCase ( a : Any , a : str , a : str ):
# Initialise PyTorch model
snake_case__ = ReformerConfig.from_json_file(a )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case__ = ReformerModelWithLMHead(a )
with open(a , """rb""" ) as f:
snake_case__ = pickle.load(a )["""weights"""]
set_model_weights_in_torch(a , a , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 99 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def _UpperCAmelCase ( ):
snake_case__ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
snake_case__ = parser.parse_args()
return args.f
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = logging.StreamHandler(sys.stdout)
logger.addHandler(UpperCamelCase__)
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : List[str]):
'''simple docstring'''
snake_case__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""")
with patch.object(UpperCamelCase__ , """argv""" , UpperCamelCase__):
snake_case__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(UpperCamelCase__ , 0.6_66)
@slow
@require_torch_non_multi_gpu
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(UpperCamelCase__)
snake_case__ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(UpperCamelCase__)
snake_case__ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(UpperCamelCase__)
| 99 | 1 |
'''simple docstring'''
def UpperCAmelCase ( A : int , A : int ):
return x if y == 0 else greatest_common_divisor(A , x % y )
def UpperCAmelCase ( A : int , A : int ):
return (x * y) // greatest_common_divisor(A , A )
def UpperCAmelCase ( A : int = 20 ):
SCREAMING_SNAKE_CASE : Tuple = 1
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE : Optional[int] = lcm(A , A )
return g
if __name__ == "__main__":
print(f'{solution() = }')
| 527 |
'''simple docstring'''
from math import pow
def UpperCAmelCase ( A : int , A : int , A : int , A : int , A : int , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
SCREAMING_SNAKE_CASE : Tuple = int(pow(A , A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = backtrack(
A , A , current_number + 1 , A , A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = backtrack(
A , A , current_number + 1 , A , A )
return current_sum, solutions_count
def UpperCAmelCase ( A : int , A : int ):
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(A , A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 527 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[Any] ={
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any =['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict =[
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] =[
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str =[
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
a__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 434 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : str , __A : str=9_9 , __A : Optional[Any]=1_3 , __A : Optional[int]=1_6 , __A : Optional[Any]=7 , __A : List[Any]=True , __A : List[str]=True , __A : int=True , __A : str=False , __A : List[str]=True , __A : Optional[int]=2 , __A : Optional[Any]=3_2 , __A : str=4 , __A : int=4 , __A : Union[str, Any]=3_0 , __A : Union[str, Any]=0 , __A : Optional[int]=1 , __A : List[str]=2 , __A : Tuple=None , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = decoder_seq_length
# For common tests
__UpperCamelCase = self.decoder_seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_attention_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = d_model
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = eos_token_id
__UpperCamelCase = bos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = decoder_start_token_id
__UpperCamelCase = use_cache
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = None
__UpperCamelCase = decoder_seq_length
__UpperCamelCase = 2
__UpperCamelCase = 1
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_attention_mask:
__UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCamelCase ( self : List[Any] , __A : str , __A : str , __A : Tuple , __A : Optional[int] , ):
__UpperCamelCase = True
__UpperCamelCase = TrOCRDecoder(config=__A ).to(__A ).eval()
__UpperCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCamelCase = model(__A , use_cache=__A )
__UpperCamelCase = model(__A )
__UpperCamelCase = model(__A , use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
__UpperCamelCase = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase = model(__A )['last_hidden_state']
__UpperCamelCase = model(__A , past_key_values=__A )['last_hidden_state']
# select random slice
__UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__A , __A , atol=1e-3 )
def _lowerCamelCase ( self : int ):
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Any =(TrOCRForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : int ={"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : Dict =True
SCREAMING_SNAKE_CASE_ : Tuple =False
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=__A )
__UpperCamelCase = ConfigTester(self , config_class=__A )
def _lowerCamelCase ( self : List[str] ):
pass
def _lowerCamelCase ( self : List[str] ):
pass
def _lowerCamelCase ( self : List[Any] ):
pass
def _lowerCamelCase ( self : Dict ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__A )
def _lowerCamelCase ( self : Union[str, Any] ):
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def _lowerCamelCase ( self : Optional[int] ):
pass
| 434 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 409 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : str = 'segformer'
def __init__( self : Tuple , lowerCAmelCase : str=3 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : List[str]=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[8, 4, 2, 1] , lowerCAmelCase : Optional[int]=[32, 64, 160, 256] , lowerCAmelCase : int=[7, 3, 3, 3] , lowerCAmelCase : str=[4, 2, 2, 2] , lowerCAmelCase : str=[1, 2, 5, 8] , lowerCAmelCase : Union[str, Any]=[4, 4, 4, 4] , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : str=0.0_2 , lowerCAmelCase : int=0.1 , lowerCAmelCase : Union[str, Any]=1E-6 , lowerCAmelCase : List[Any]=256 , lowerCAmelCase : Tuple=255 , **lowerCAmelCase : Tuple , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: int =num_channels
SCREAMING_SNAKE_CASE_: int =num_encoder_blocks
SCREAMING_SNAKE_CASE_: List[str] =depths
SCREAMING_SNAKE_CASE_: Tuple =sr_ratios
SCREAMING_SNAKE_CASE_: Any =hidden_sizes
SCREAMING_SNAKE_CASE_: List[str] =patch_sizes
SCREAMING_SNAKE_CASE_: Dict =strides
SCREAMING_SNAKE_CASE_: Optional[int] =mlp_ratios
SCREAMING_SNAKE_CASE_: List[str] =num_attention_heads
SCREAMING_SNAKE_CASE_: int =hidden_act
SCREAMING_SNAKE_CASE_: Union[str, Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: Dict =initializer_range
SCREAMING_SNAKE_CASE_: Any =drop_path_rate
SCREAMING_SNAKE_CASE_: Union[str, Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[Any] =kwargs.get("""reshape_last_stage""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =semantic_loss_ignore_index
class a ( UpperCAmelCase__ ):
UpperCamelCase : List[str] = version.parse('1.11' )
@property
def lowerCamelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ) -> float:
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return 12
| 409 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
a__ : List[str] = logging.get_logger(__name__)
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = torch.load(__lowerCamelCase, map_location='cpu' )
if "model" in sd.keys():
_lowerCAmelCase = torch.load(__lowerCamelCase, map_location='cpu' )['model']
# pop unnecessary weights
_lowerCAmelCase = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__lowerCamelCase )
_lowerCAmelCase = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCAmelCase = sd.pop(__lowerCamelCase )
_lowerCAmelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCAmelCase = sd[key]
# We split QKV in separate Q,K,V
_lowerCAmelCase = key.replace('.qkv_proj.', '.q_proj.' )
_lowerCAmelCase = key.replace('.qkv_proj.', '.k_proj.' )
_lowerCAmelCase = key.replace('.qkv_proj.', '.v_proj.' )
_lowerCAmelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = torch.split(__lowerCamelCase, depth // 3, dim=0 )
_lowerCAmelCase = q
_lowerCAmelCase = k
_lowerCAmelCase = v
del sd[key]
return sd
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
"""simple docstring"""
_lowerCAmelCase = load_checkpoint(__lowerCamelCase )
if config is not None:
_lowerCAmelCase = OPTConfig.from_pretrained(__lowerCamelCase )
else:
_lowerCAmelCase = OPTConfig()
_lowerCAmelCase = OPTModel(__lowerCamelCase ).half().eval()
model.load_state_dict(__lowerCamelCase )
# Check results
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
a__ : Dict = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 309 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def A__ ( __lowerCamelCase ):
"""simple docstring"""
return np.maximum(0, __lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309 | 1 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_UpperCamelCase = "sshleifer/bart-tiny-random"
_UpperCamelCase = "patrickvonplaten/t5-tiny-random"
@require_torch
class __UpperCAmelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return AutoConfig.from_pretrained(snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ , *A__ : Dict = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ , *A__ : Any = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ , *A__ : Union[str, Any] = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=snake_case_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ , *A__ : Optional[int] = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(snake_case_ ):
create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=snake_case_ , d=snake_case_ )
| 363 | """simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_UpperCamelCase = logging.getLogger(__name__)
_UpperCamelCase = "pytorch_model.bin"
@dataclasses.dataclass
class __UpperCAmelCase :
'''simple docstring'''
_UpperCamelCase : str = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
_UpperCamelCase : Optional[str] = dataclasses.field(
default=__A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __UpperCAmelCase :
'''simple docstring'''
_UpperCamelCase : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
_UpperCamelCase : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
_UpperCamelCase : Optional[str] = dataclasses.field(
default=__A , metadata={'help': 'A csv or a json file containing the validation data.'} )
_UpperCamelCase : Optional[str] = dataclasses.field(
default=__A , metadata={'help': 'The name of the task to train on.'} , )
_UpperCamelCase : Optional[List[str]] = dataclasses.field(
default=__A , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __UpperCAmelCase :
'''simple docstring'''
_UpperCamelCase : str = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
_UpperCamelCase : Optional[str] = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
_UpperCamelCase : Optional[str] = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
_UpperCamelCase : Optional[int] = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
_UpperCamelCase : Optional[bool] = dataclasses.field(
default=__A , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
_UpperCamelCase : Optional[bool] = dataclasses.field(
default=__A , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
_UpperCamelCase : Optional[bool] = dataclasses.field(
default=__A , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
_UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
_UpperCamelCase : Optional[int] = dataclasses.field(
default=100 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_UpperCamelCase : Optional[int] = dataclasses.field(
default=__A , metadata={'help': 'Random seed for initialization.'} , )
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
A__ : Optional[int] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
A__ : Union[str, Any] = dataset.filter(lambda lowerCAmelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
A__ : Dict = int(eval_result * len(lowerCAmelCase ) )
print(lowerCAmelCase )
A__ : Dict = dataset.sort("""probability""" , reverse=lowerCAmelCase )
A__ : Tuple = dataset.select(range(lowerCAmelCase ) )
A__ : Optional[int] = dataset.remove_columns(["""label""", """probability"""] )
A__ : Dict = dataset.rename_column("""prediction""" , """label""" )
A__ : str = dataset.map(lambda lowerCAmelCase : {"label": idalabel[example["label"]]} )
A__ : Optional[Any] = dataset.shuffle(seed=args.seed )
A__ : Any = os.path.join(lowerCAmelCase , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowerCAmelCase , index=lowerCAmelCase )
else:
dataset.to_json(lowerCAmelCase )
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ):
A__ : Dict = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
A__ : Union[str, Any] = STModelArguments(model_name_or_path=lowerCAmelCase )
A__ : Tuple = STDataArguments(train_file=lowerCAmelCase , infer_file=lowerCAmelCase )
A__ : List[str] = STTrainingArguments(output_dir=lowerCAmelCase )
A__ : Tuple = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowerCAmelCase ).items():
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
for key, value in kwargs.items():
if hasattr(lowerCAmelCase , lowerCAmelCase ):
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Sanity checks
A__ : Optional[int] = {}
A__ : Tuple = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
A__ : Tuple = args.train_file
A__ : int = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
A__ : Optional[Any] = args.eval_file
for key in data_files:
A__ : List[str] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
A__ : int = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
A__ : Union[str, Any] = F'''{args.output_dir}/self-train_iter-{{}}'''.format
A__ : Any = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
accelerator.wait_for_everyone()
A__ : Union[str, Any] = None
A__ : str = None
A__ : Union[str, Any] = 0
A__ : List[Any] = False
# Show the progress bar
A__ : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
A__ : List[str] = data_dir_format(lowerCAmelCase )
assert os.path.exists(lowerCAmelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
A__ : int = os.path.join(lowerCAmelCase , """stage-1""" )
A__ : Any = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowerCAmelCase , lowerCAmelCase ):
arguments_dict.update({key: value} )
A__ : Union[str, Any] = os.path.join(lowerCAmelCase , """best-checkpoint""" , lowerCAmelCase )
if os.path.exists(lowerCAmelCase ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , lowerCAmelCase , lowerCAmelCase , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , lowerCAmelCase )
finetune(**lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , lowerCAmelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
A__ : Union[str, Any] = os.path.join(lowerCAmelCase , """best-checkpoint""" )
A__ : str = os.path.join(lowerCAmelCase , """stage-2""" )
# Update arguments_dict
A__ : Dict = model_path
A__ : Optional[Any] = data_files["""train"""]
A__ : Optional[Any] = current_output_dir
A__ : Union[str, Any] = os.path.join(lowerCAmelCase , """best-checkpoint""" , lowerCAmelCase )
if os.path.exists(lowerCAmelCase ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , lowerCAmelCase , lowerCAmelCase , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , lowerCAmelCase )
finetune(**lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , lowerCAmelCase )
A__ : List[str] = iteration
A__ : List[str] = data_dir_format(iteration + 1 )
A__ : List[Any] = AutoConfig.from_pretrained(os.path.join(lowerCAmelCase , """best-checkpoint""" ) )
A__ : int = config.idalabel
A__ : Tuple = os.path.join(lowerCAmelCase , """eval_results_best-checkpoint.json""" )
A__ : str = os.path.join(lowerCAmelCase , """test_results_best-checkpoint.json""" )
assert os.path.exists(lowerCAmelCase )
with open(lowerCAmelCase , """r""" ) as f:
A__ : List[str] = float(json.load(lowerCAmelCase )[args.eval_metric] )
A__ : List[Any] = os.path.join(lowerCAmelCase , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(lowerCAmelCase )
# Loading the dataset from local csv or json files.
A__ : List[str] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
A__ : List[Any] = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
shutil.copy(lowerCAmelCase , os.path.join(lowerCAmelCase , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowerCAmelCase ):
shutil.copy(lowerCAmelCase , os.path.join(lowerCAmelCase , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
accelerator.wait_for_everyone()
A__ : int = os.path.join(lowerCAmelCase , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
A__ : Optional[int] = eval_result
if best_iteration is None:
A__ : List[str] = new_iteration
A__ : List[str] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
A__ : List[Any] = new_iteration
A__ : str = new_eval_result
A__ : Dict = 0
else:
if new_eval_result == best_eval_result:
A__ : Optional[int] = new_iteration
A__ : Optional[int] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
A__ : Tuple = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , lowerCAmelCase )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowerCAmelCase , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowerCAmelCase , """eval_results_best-iteration.json""" ) , )
| 363 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Any = ["model.decoder.embed_positions.weights"]
def lowerCamelCase ( UpperCAmelCase__ : List[Any] ) -> List[str]:
if "emb" in name:
lowercase_ : Tuple = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowercase_ : Optional[int] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowercase_ : int = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowercase_ : Tuple = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowercase_ : List[Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowercase_ : int = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowercase_ : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowercase_ : List[Any] = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowercase_ : Union[str, Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowercase_ : List[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase_ : List[str] = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def lowerCamelCase ( UpperCAmelCase__ : OrderedDict , UpperCAmelCase__ : int ) -> Tuple[Dict, Dict]:
lowercase_ : Any = list(state_dict.keys() )
lowercase_ : Optional[int] = {}
for key in keys:
lowercase_ : Optional[int] = state_dict.pop(UpperCAmelCase__ )
lowercase_ : Any = rename_keys(UpperCAmelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase_ : Optional[Any] = val[:hidden_size, :]
lowercase_ : List[str] = val[hidden_size : 2 * hidden_size, :]
lowercase_ : str = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase_ : int = val
else:
lowercase_ : str = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase ( UpperCAmelCase__ : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowercase_ : int = 1024
lowercase_ : List[str] = 24
lowercase_ : List[Any] = 16
elif checkpoint == "medium":
lowercase_ : Dict = 1536
lowercase_ : Union[str, Any] = 48
lowercase_ : List[Any] = 24
elif checkpoint == "large":
lowercase_ : Tuple = 2048
lowercase_ : Union[str, Any] = 48
lowercase_ : Tuple = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowercase_ : Union[str, Any] = MusicgenDecoderConfig(
hidden_size=UpperCAmelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCAmelCase__ , num_attention_heads=UpperCAmelCase__ , )
return config
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Dict="cpu" ) -> List[Any]:
lowercase_ : List[str] = MusicGen.get_pretrained(UpperCAmelCase__ , device=UpperCAmelCase__ )
lowercase_ : Optional[Any] = decoder_config_from_checkpoint(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = fairseq_model.lm.state_dict()
lowercase_ : Optional[int] = rename_state_dict(
UpperCAmelCase__ , hidden_size=decoder_config.hidden_size )
lowercase_ : int = TaEncoderModel.from_pretrained("""t5-base""" )
lowercase_ : Tuple = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowercase_ : Optional[Any] = MusicgenForCausalLM(UpperCAmelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase_ : Union[str, Any] = decoder.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCAmelCase__ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowercase_ : Tuple = MusicgenForConditionalGeneration(text_encoder=UpperCAmelCase__ , audio_encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCAmelCase__ )
# check we can do a forward pass
lowercase_ : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase_ : List[str] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase_ : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowercase_ : List[str] = AutoTokenizer.from_pretrained("""t5-base""" )
lowercase_ : Optional[int] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowercase_ : Optional[int] = MusicgenProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
# set the appropriate bos/pad token ids
lowercase_ : int = 2048
lowercase_ : Union[str, Any] = 2048
# set other default generation config params
lowercase_ : List[str] = int(30 * audio_encoder.config.frame_rate )
lowercase_ : List[Any] = True
lowercase_ : Union[str, Any] = 3.0
if pytorch_dump_folder is not None:
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCAmelCase__ )
processor.push_to_hub(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
_lowercase : Tuple = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 701 | '''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_lowercase = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 632 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( UpperCamelCase_ : List[Any] )-> Dict:
if isinstance(UpperCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _UpperCAmelCase :
def snake_case_ ( self , a__ , a__):
pass
def snake_case_ ( self):
pass
def snake_case_ ( self):
pass
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__):
A__ = VisionTextDualEncoderConfig.from_vision_text_configs(a__ , a__)
A__ = TFVisionTextDualEncoderModel(a__)
A__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__):
A__ , A__ = self.get_vision_text_model(a__ , a__)
A__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__)
A__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__):
A__ , A__ = self.get_vision_text_model(a__ , a__)
A__ = {'''vision_model''': vision_model, '''text_model''': text_model}
A__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a__)
A__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__):
A__ , A__ = self.get_vision_text_model(a__ , a__)
A__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__)
A__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__)
A__ = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__)
A__ = TFVisionTextDualEncoderModel.from_pretrained(a__)
A__ = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__)
A__ = after_output[0].numpy()
A__ = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(a__ , 1e-5)
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__):
A__ , A__ = self.get_vision_text_model(a__ , a__)
A__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__)
A__ = model(
input_ids=a__ , pixel_values=a__ , attention_mask=a__ , output_attentions=a__)
A__ = output.vision_model_output.attentions
self.assertEqual(len(a__) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ = to_atuple(vision_model.config.image_size)
A__ = to_atuple(vision_model.config.patch_size)
A__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
A__ = output.text_model_output.attentions
self.assertEqual(len(a__) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case_ ( self , a__ , a__ , a__):
A__ = np.abs((a - b)).max()
self.assertLessEqual(a__ , a__ , F"Difference between torch and flax is {diff} (>= {tol}).")
def snake_case_ ( self):
A__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a__)
def snake_case_ ( self):
A__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a__)
def snake_case_ ( self):
A__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a__)
def snake_case_ ( self):
A__ = self.prepare_config_and_inputs()
self.check_save_load(**a__)
def snake_case_ ( self):
A__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a__)
@slow
def snake_case_ ( self):
A__ , A__ = self.get_pretrained_model_and_inputs()
A__ = model_a(**a__)
A__ = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a__)
A__ = TFVisionTextDualEncoderModel.from_pretrained(a__)
A__ = model_a(**a__)
A__ = after_outputs[0].numpy()
A__ = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(a__ , 1e-5)
@require_tf
class _UpperCAmelCase ( A__ , unittest.TestCase ):
def snake_case_ ( self):
A__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''')
A__ = 1_3
A__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
A__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
A__ = random_attention_mask([batch_size, 4])
A__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case_ ( self , a__ , a__):
A__ = TFViTModel(a__ , name='''vision_model''')
A__ = TFBertModel(a__ , name='''text_model''')
return vision_model, text_model
def snake_case_ ( self):
A__ = TFViTModelTester(self)
A__ = TFBertModelTester(self)
A__ = vit_model_tester.prepare_config_and_inputs()
A__ = bert_model_tester.prepare_config_and_inputs()
A__ , A__ , A__ = vision_config_and_inputs
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _UpperCAmelCase ( A__ , unittest.TestCase ):
def snake_case_ ( self):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
A__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''')
A__ = 1_3
A__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
A__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
A__ = random_attention_mask([batch_size, 4])
A__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__):
A__ , A__ = self.get_vision_text_model(a__ , a__)
A__ = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__)
A__ = model(
input_ids=a__ , pixel_values=a__ , attention_mask=a__ , output_attentions=a__)
A__ = output.vision_model_output.attentions
self.assertEqual(len(a__) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A__ = to_atuple(vision_model.config.image_size)
A__ = to_atuple(vision_model.config.patch_size)
A__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A__ = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
A__ = output.text_model_output.attentions
self.assertEqual(len(a__) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case_ ( self , a__ , a__):
A__ = TFDeiTModel(a__ , name='''vision_model''')
A__ = TFRobertaModel(a__ , name='''text_model''')
return vision_model, text_model
def snake_case_ ( self):
A__ = TFDeiTModelTester(self)
A__ = TFRobertaModelTester(self)
A__ = vit_model_tester.prepare_config_and_inputs()
A__ = bert_model_tester.prepare_config_and_inputs()
A__ , A__ , A__ = vision_config_and_inputs
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _UpperCAmelCase ( A__ , unittest.TestCase ):
def snake_case_ ( self):
A__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''')
A__ = 1_3
A__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
A__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
A__ = random_attention_mask([batch_size, 4])
A__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case_ ( self , a__ , a__):
A__ = TFCLIPVisionModel(a__ , name='''vision_model''')
A__ = TFBertModel(a__ , name='''text_model''')
return vision_model, text_model
def snake_case_ ( self):
A__ = TFCLIPVisionModelTester(self)
A__ = TFBertModelTester(self)
A__ = clip_model_tester.prepare_config_and_inputs()
A__ = bert_model_tester.prepare_config_and_inputs()
A__ , A__ = vision_config_and_inputs
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def snake_case_ ( self):
A__ = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=a__)
A__ = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''')
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
A__ = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=a__ , padding=a__ , return_tensors='''np''')
A__ = model(**a__)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A__ = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a__ , atol=1e-3))
| 632 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Any = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
_UpperCamelCase : Dict = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
_UpperCamelCase : Optional[int] = model(UpperCamelCase__ )["last_hidden_state"]
_UpperCamelCase : Optional[Any] = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice.
_UpperCamelCase : Optional[Any] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 700 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_SCREAMING_SNAKE_CASE = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def __lowerCAmelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ) -> str:
_UpperCamelCase : str = state_dict.pop(__lowerCAmelCase )
_UpperCamelCase : Union[str, Any] = val
def __lowerCAmelCase ( __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCamelCase : Optional[Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_UpperCamelCase : str = value
else:
_UpperCamelCase : str = value
return new_state_dict
def __lowerCAmelCase ( __lowerCAmelCase : str ) -> List[str]:
_UpperCamelCase : Any = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCamelCase : Any = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
_UpperCamelCase : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : Tuple = in_proj_weight[:256, :]
_UpperCamelCase : Dict = in_proj_bias[:256]
_UpperCamelCase : int = in_proj_weight[256:512, :]
_UpperCamelCase : Tuple = in_proj_bias[256:512]
_UpperCamelCase : str = in_proj_weight[-256:, :]
_UpperCamelCase : int = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
_UpperCamelCase : Optional[int] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[Any] = in_proj_weight[:256, :]
_UpperCamelCase : str = in_proj_bias[:256]
_UpperCamelCase : Optional[Any] = in_proj_weight[256:512, :]
_UpperCamelCase : str = in_proj_bias[256:512]
_UpperCamelCase : Tuple = in_proj_weight[-256:, :]
_UpperCamelCase : Optional[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_UpperCamelCase : Any = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
_UpperCamelCase : Tuple = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCamelCase : Optional[Any] = in_proj_weight_cross_attn[:256, :]
_UpperCamelCase : str = in_proj_bias_cross_attn[:256]
_UpperCamelCase : Dict = in_proj_weight_cross_attn[256:512, :]
_UpperCamelCase : int = in_proj_bias_cross_attn[256:512]
_UpperCamelCase : int = in_proj_weight_cross_attn[-256:, :]
_UpperCamelCase : Optional[Any] = in_proj_bias_cross_attn[-256:]
def __lowerCAmelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> List[Any]:
_UpperCamelCase , _UpperCamelCase : List[str] = image.size
_UpperCamelCase : Dict = max(__lowerCAmelCase , __lowerCAmelCase )
_UpperCamelCase : Tuple = 800 if "detection" in checkpoint_url else 1000
_UpperCamelCase : Tuple = target_max_size / current_max_size
_UpperCamelCase : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCAmelCase ( __lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = F.to_tensor(__lowerCAmelCase )
_UpperCamelCase : Dict = F.normalize(__lowerCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCAmelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ) -> Tuple:
logger.info("Converting model..." )
# load original state dict
_UpperCamelCase : Tuple = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCamelCase : List[str] = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCamelCase : List[Any] = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_UpperCamelCase : Dict = state_dict.pop(__lowerCAmelCase )
_UpperCamelCase : Any = val
# create HuggingFace model and load state dict
_UpperCamelCase : str = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_UpperCamelCase : List[str] = 15
_UpperCamelCase : Optional[Any] = 2
_UpperCamelCase : Optional[Any] = {0: "table", 1: "table rotated"}
_UpperCamelCase : Any = idalabel
_UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
_UpperCamelCase : Any = 125
_UpperCamelCase : List[str] = 6
_UpperCamelCase : str = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
_UpperCamelCase : Optional[Any] = idalabel
_UpperCamelCase : str = {v: k for k, v in idalabel.items()}
_UpperCamelCase : str = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
_UpperCamelCase : Any = TableTransformerForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify our conversion
_UpperCamelCase : str = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
_UpperCamelCase : Dict = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__lowerCAmelCase )
_UpperCamelCase : Dict = Image.open(__lowerCAmelCase ).convert("RGB" )
_UpperCamelCase : List[Any] = normalize(resize(__lowerCAmelCase , __lowerCAmelCase ) ).unsqueeze(0 )
_UpperCamelCase : str = model(__lowerCAmelCase )
if "detection" in checkpoint_url:
_UpperCamelCase : Any = (1, 15, 3)
_UpperCamelCase : Optional[Any] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_UpperCamelCase : Any = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_UpperCamelCase : Any = (1, 125, 7)
_UpperCamelCase : List[Any] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_UpperCamelCase : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
_UpperCamelCase : Any = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__lowerCAmelCase )
image_processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 239 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A__ ( A__ , A__ , unittest.TestCase ):
A__ = StableDiffusionPanoramaPipeline
A__ = TEXT_TO_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_BATCH_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def A ( self : Any ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_SCREAMING_SNAKE_CASE =DDIMScheduler()
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_SCREAMING_SNAKE_CASE =CLIPTextModel(_a )
_SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_SCREAMING_SNAKE_CASE ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : Tuple , _a : Tuple , _a : Union[str, Any]=0 ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =StableDiffusionPanoramaPipeline(**_a )
_SCREAMING_SNAKE_CASE =sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =sd_pipe(**_a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Union[str, Any] ) -> str:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def A ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =StableDiffusionPanoramaPipeline(**_a )
_SCREAMING_SNAKE_CASE =sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE ='french fries'
_SCREAMING_SNAKE_CASE =sd_pipe(**_a , negative_prompt=_a )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =StableDiffusionPanoramaPipeline(**_a )
_SCREAMING_SNAKE_CASE =sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =sd_pipe(**_a , view_batch_size=2 )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' )
_SCREAMING_SNAKE_CASE =StableDiffusionPanoramaPipeline(**_a )
_SCREAMING_SNAKE_CASE =sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =sd_pipe(**_a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : str ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , skip_prk_steps=_a )
_SCREAMING_SNAKE_CASE =StableDiffusionPanoramaPipeline(**_a )
_SCREAMING_SNAKE_CASE =sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =sd_pipe(**_a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def A ( self : Dict ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any , _a : List[Any]=0 ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def A ( self : Tuple ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='stabilityai/stable-diffusion-2-base'
_SCREAMING_SNAKE_CASE =DDIMScheduler.from_pretrained(_a , subfolder='scheduler' )
_SCREAMING_SNAKE_CASE =StableDiffusionPanoramaPipeline.from_pretrained(_a , scheduler=_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE =self.get_inputs()
_SCREAMING_SNAKE_CASE =pipe(**_a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_SCREAMING_SNAKE_CASE =np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def A ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=_a )
_SCREAMING_SNAKE_CASE =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE =self.get_inputs()
_SCREAMING_SNAKE_CASE =pipe(**_a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_SCREAMING_SNAKE_CASE =np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A ( self : int ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0
def callback_fn(_a : int , _a : int , _a : torch.FloatTensor ) -> None:
_SCREAMING_SNAKE_CASE =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_SCREAMING_SNAKE_CASE =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_SCREAMING_SNAKE_CASE =latents[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_SCREAMING_SNAKE_CASE =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_SCREAMING_SNAKE_CASE =latents[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE ='stabilityai/stable-diffusion-2-base'
_SCREAMING_SNAKE_CASE =DDIMScheduler.from_pretrained(_a , subfolder='scheduler' )
_SCREAMING_SNAKE_CASE =StableDiffusionPanoramaPipeline.from_pretrained(_a , scheduler=_a , safety_checker=_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE =self.get_inputs()
pipe(**_a , callback=_a , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def A ( self : int ) -> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_SCREAMING_SNAKE_CASE ='stabilityai/stable-diffusion-2-base'
_SCREAMING_SNAKE_CASE =DDIMScheduler.from_pretrained(_a , subfolder='scheduler' )
_SCREAMING_SNAKE_CASE =StableDiffusionPanoramaPipeline.from_pretrained(_a , scheduler=_a , safety_checker=_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE =self.get_inputs()
_SCREAMING_SNAKE_CASE =pipe(**_a )
_SCREAMING_SNAKE_CASE =torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 405 |
'''simple docstring'''
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(_UpperCamelCase , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 405 | 1 |
# Algorithm for the pigeonhole sorting
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ ) # min() finds the minimum value
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ ) # max() finds the maximum value
SCREAMING_SNAKE_CASE = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE = 0
for count in range(UpperCAmelCase__ ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE = count + min_val
i += 1
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(UpperCAmelCase__ )
print("Sorted order is:" , " ".join(UpperCAmelCase__ ) )
if __name__ == "__main__":
main()
| 647 | import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : List[Any]=64 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=32 , _UpperCamelCase : str=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Any=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Optional[int]=10 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Union[str, Any]=[1, 16, 4, 4] , _UpperCamelCase : Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCamelCase , )
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ : List[Any] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __snake_case( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 647 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
A_ : List[Any] =logging.get_logger(__name__)
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
a_ = feature_size
a_ = sampling_rate
a_ = padding_value
a_ = kwargs.pop("""padding_side""" , """right""" )
a_ = kwargs.pop("""return_attention_mask""" , _UpperCAmelCase )
super().__init__(**_UpperCAmelCase )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
a_ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
a_ = processed_features[self.model_input_names[0]]
a_ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_UpperCAmelCase ) == 0:
if return_attention_mask:
a_ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
a_ = required_input[0]
if isinstance(_UpperCAmelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
a_ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_UpperCAmelCase ):
a_ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_UpperCAmelCase ):
a_ = """tf"""
elif is_torch_tensor(_UpperCAmelCase ):
a_ = """pt"""
elif isinstance(_UpperCAmelCase , (int, float, list, tuple, np.ndarray) ):
a_ = """np"""
else:
raise ValueError(
f"type of {first_element} unknown: {type(_UpperCAmelCase )}. "
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
a_ = to_numpy(_UpperCAmelCase )
else:
a_ = [to_numpy(_UpperCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
a_ = self._get_padding_strategies(padding=_UpperCAmelCase , max_length=_UpperCAmelCase )
a_ = processed_features[self.model_input_names[0]]
a_ = len(_UpperCAmelCase )
if not all(len(_UpperCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
a_ = []
for i in range(_UpperCAmelCase ):
a_ = {k: v[i] for k, v in processed_features.items()}
# truncation
a_ = self._truncate(
_UpperCAmelCase , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , truncation=_UpperCAmelCase , )
truncated_inputs.append(_UpperCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
a_ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
a_ = PaddingStrategy.MAX_LENGTH
a_ = {}
for i in range(_UpperCAmelCase ):
# padding
a_ = self._pad(
truncated_inputs[i] , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
a_ = []
if value.dtype is np.dtype(np.floataa ):
a_ = value.astype(np.floataa )
batch_outputs[key].append(_UpperCAmelCase )
return BatchFeature(_UpperCAmelCase , tensor_type=_UpperCAmelCase )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
"""simple docstring"""
a_ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
a_ = len(_UpperCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_UpperCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
a_ = np.ones(len(_UpperCAmelCase ) , dtype=np.intaa )
if needs_to_be_padded:
a_ = max_length - len(_UpperCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
a_ = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
a_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
a_ = np.pad(
_UpperCAmelCase , _UpperCAmelCase , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
a_ = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
a_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
a_ = np.pad(
_UpperCAmelCase , _UpperCAmelCase , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
a_ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a_ = len(_UpperCAmelCase ) > max_length
if needs_to_be_truncated:
a_ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
a_ = processed_features["""attention_mask"""][:max_length]
return processed_features
def lowercase__ ( self , _UpperCAmelCase=False , _UpperCAmelCase=None ):
"""simple docstring"""
if padding is not False:
if padding is True:
a_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = PaddingStrategy(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = padding
else:
a_ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy | 483 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowercase_ ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
a_ = torch.nn.Linear(10 , 10 )
a_ = torch.optim.SGD(model.parameters() , 0.1 )
a_ = Accelerator()
a_ = accelerator.prepare(_UpperCAmelCase )
try:
pickle.loads(pickle.dumps(_UpperCAmelCase ) )
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state() | 483 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
UpperCAmelCase_ : Tuple = 'The dog is cute and lives in the garden house'
UpperCAmelCase_ : Dict = jnp.array([tokenizer.encode(snake_case_ )] )
UpperCAmelCase_ : str = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ : List[str] = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
UpperCAmelCase_ : str = model(snake_case_ )['last_hidden_state']
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case_ , atol=1E-3 ) )
| 389 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
UpperCAmelCase_ : Tuple = 'The dog is cute and lives in the garden house'
UpperCAmelCase_ : Dict = jnp.array([tokenizer.encode(snake_case_ )] )
UpperCAmelCase_ : str = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ : List[str] = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
UpperCAmelCase_ : str = model(snake_case_ )['last_hidden_state']
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case_ , atol=1E-3 ) )
| 389 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.