code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 412 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : List[Any] , **UpperCamelCase__ : Any ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : Tuple , UpperCamelCase__ : Union[np.ndarray, bytes, str] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = {}
if "candidate_labels" in kwargs:
lowercase_ = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowercase_ = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int="This is a sound of {}." ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowercase_ = requests.get(UpperCamelCase__ ).content
else:
with open(UpperCamelCase__ , """rb""" ) as f:
lowercase_ = f.read()
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = ffmpeg_read(UpperCamelCase__ , self.feature_extractor.sampling_rate )
if not isinstance(UpperCamelCase__ , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
lowercase_ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
lowercase_ = candidate_labels
lowercase_ = [hypothesis_template.format(UpperCamelCase__ ) for x in candidate_labels]
lowercase_ = self.tokenizer(UpperCamelCase__ , return_tensors=self.framework , padding=UpperCamelCase__ )
lowercase_ = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = model_inputs.pop("""candidate_labels""" )
lowercase_ = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , UpperCamelCase__ ):
lowercase_ = text_inputs[0]
else:
# Batching case.
lowercase_ = text_inputs[0][0]
lowercase_ = self.model(**UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = model_outputs.pop("""candidate_labels""" )
lowercase_ = model_outputs["""logits"""][0]
if self.framework == "pt":
lowercase_ = logits.softmax(dim=0 )
lowercase_ = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
lowercase_ = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : -x[0] )
]
return result
| 412 | 1 |
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__snake_case = logging.get_logger(__name__)
class _a ( __a ):
"""simple docstring"""
A_ = '''mask2former'''
A_ = ['''swin''']
A_ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : List[Any] , lowercase_ : Optional[Dict] = None , lowercase_ : int = 256 , lowercase_ : int = 256 , lowercase_ : int = 256 , lowercase_ : int = 1_024 , lowercase_ : str = "relu" , lowercase_ : int = 6 , lowercase_ : int = 10 , lowercase_ : int = 8 , lowercase_ : float = 0.0 , lowercase_ : int = 2_048 , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : int = 4 , lowercase_ : int = 255 , lowercase_ : int = 100 , lowercase_ : float = 0.1 , lowercase_ : float = 2.0 , lowercase_ : float = 5.0 , lowercase_ : float = 5.0 , lowercase_ : int = 12_544 , lowercase_ : float = 3.0 , lowercase_ : float = 0.7_5 , lowercase_ : float = 0.0_2 , lowercase_ : float = 1.0 , lowercase_ : bool = True , lowercase_ : List[int] = [4, 8, 16, 32] , lowercase_ : bool = None , **lowercase_ : Union[str, Any] , ):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase_ = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowercase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(lowercase_ , lowercase_ ):
lowercase_ = backbone_config.pop("""model_type""" )
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(lowercase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
lowercase_ = backbone_config
lowercase_ = feature_size
lowercase_ = mask_feature_size
lowercase_ = hidden_dim
lowercase_ = encoder_feedforward_dim
lowercase_ = activation_function
lowercase_ = encoder_layers
lowercase_ = decoder_layers
lowercase_ = num_attention_heads
lowercase_ = dropout
lowercase_ = dim_feedforward
lowercase_ = pre_norm
lowercase_ = enforce_input_projection
lowercase_ = common_stride
lowercase_ = ignore_value
lowercase_ = num_queries
lowercase_ = no_object_weight
lowercase_ = class_weight
lowercase_ = mask_weight
lowercase_ = dice_weight
lowercase_ = train_num_points
lowercase_ = oversample_ratio
lowercase_ = importance_sample_ratio
lowercase_ = init_std
lowercase_ = init_xavier_std
lowercase_ = use_auxiliary_loss
lowercase_ = feature_strides
lowercase_ = output_auxiliary_logits
lowercase_ = decoder_layers
super().__init__(**lowercase_ )
@classmethod
def lowerCamelCase__ ( cls : Any , lowercase_ : PretrainedConfig , **lowercase_ : int ):
'''simple docstring'''
return cls(
backbone_config=lowercase_ , **lowercase_ , )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 603 | '''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
class _a ( __a ):
"""simple docstring"""
A_ = ['''pixel_values''']
def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**lowercase_ )
lowercase_ = size if size is not None else {"""shortest_edge""": 384}
lowercase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase_ = do_resize
lowercase_ = size
# Default value set here for backwards compatibility where the value in config is None
lowercase_ = crop_pct if crop_pct is not None else 224 / 256
lowercase_ = resample
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : float , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowercase_ = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowercase_ = int(shortest_edge / crop_pct )
lowercase_ = get_resize_output_image_size(lowercase_ , size=lowercase_ , default_to_square=lowercase_ )
lowercase_ = resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowercase_ , size=(shortest_edge, shortest_edge) , data_format=lowercase_ , **lowercase_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowercase_ , size=(shortest_edge, shortest_edge) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ):
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ):
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : List[str] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = crop_pct if crop_pct is not None else self.crop_pct
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=lowercase_ , size=lowercase_ , crop_pct=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 603 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''vocab.txt'''}
a = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
a = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( _snake_case : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with open(_snake_case , 'r' ) as f:
_A = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Any="<unk>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : Optional[Any]="<pad>" , _UpperCAmelCase : List[str]="<mask>" , _UpperCAmelCase : List[str]="<eos>" , **_UpperCAmelCase : Dict , ):
super().__init__(**_UpperCAmelCase )
_A = load_vocab_file(_UpperCAmelCase )
_A = dict(enumerate(self.all_tokens ) )
_A = {tok: ind for ind, tok in enumerate(self.all_tokens )}
_A = unk_token
_A = cls_token
_A = pad_token
_A = mask_token
_A = eos_token
_A = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : int ):
return self._id_to_token.get(_UpperCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : str ):
return self._token_to_id.get(_UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : int , **_UpperCAmelCase : List[str] ):
return text.split()
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[str]=False ):
return len(self._id_to_token )
def lowerCAmelCase_ ( self : Optional[int] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str ):
return self._token_to_id.get(_UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : int ):
return self._id_to_token.get(_UpperCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.cls_token_id]
_A = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List , _UpperCAmelCase : Optional[List] = None , _UpperCAmelCase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_A = [1] + ([0] * len(_UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(_UpperCAmelCase ) + [1]
return mask
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Any ):
_A = os.path.join(_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(_UpperCAmelCase , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def lowerCAmelCase_ ( self : int ):
return self.get_vocab_size(with_added_tokens=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[List[str], List[AddedToken]] , _UpperCAmelCase : bool = False ):
return super()._add_tokens(_UpperCAmelCase , special_tokens=_UpperCAmelCase )
| 7 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ = logging.get_logger(__name__)
class a__ ( _lowercase ):
__magic_name__ : Tuple = ["pixel_values"]
def __init__(self : Tuple, __UpperCAmelCase : bool = True, __UpperCAmelCase : Dict[str, int] = None, __UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC, __UpperCAmelCase : bool = True, __UpperCAmelCase : Dict[str, int] = None, __UpperCAmelCase : bool = True, __UpperCAmelCase : Union[int, float] = 1 / 255, __UpperCAmelCase : bool = True, __UpperCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN, __UpperCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD, **__UpperCAmelCase : Dict, ) -> None:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = size if size is not None else {'''shortest_edge''': 224}
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__UpperCAmelCase, default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE : Dict = get_size_dict(__UpperCAmelCase, param_name='''crop_size''' )
SCREAMING_SNAKE_CASE : Optional[int] = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Any = resample
SCREAMING_SNAKE_CASE : int = do_center_crop
SCREAMING_SNAKE_CASE : str = crop_size
SCREAMING_SNAKE_CASE : int = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase__ (self : List[str], __UpperCAmelCase : np.ndarray, __UpperCAmelCase : Dict[str, int], __UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC, __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **__UpperCAmelCase : List[str], ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__UpperCAmelCase, default_to_square=__UpperCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : Any = int((256 / 224) * size['''shortest_edge'''] )
SCREAMING_SNAKE_CASE : List[Any] = get_resize_output_image_size(__UpperCAmelCase, size=__UpperCAmelCase, default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
__UpperCAmelCase, size=(size_dict['''height'''], size_dict['''width''']), resample=__UpperCAmelCase, data_format=__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : np.ndarray, __UpperCAmelCase : Dict[str, int], __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **__UpperCAmelCase : List[str], ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(__UpperCAmelCase, size=(size['''height'''], size['''width''']), data_format=__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : Any, __UpperCAmelCase : np.ndarray, __UpperCAmelCase : Union[int, float], __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **__UpperCAmelCase : List[str], ) -> np.ndarray:
"""simple docstring"""
return rescale(__UpperCAmelCase, scale=__UpperCAmelCase, data_format=__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : Optional[int], __UpperCAmelCase : np.ndarray, __UpperCAmelCase : Union[float, List[float]], __UpperCAmelCase : Union[float, List[float]], __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **__UpperCAmelCase : Tuple, ) -> np.ndarray:
"""simple docstring"""
return normalize(__UpperCAmelCase, mean=__UpperCAmelCase, std=__UpperCAmelCase, data_format=__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : List[Any], __UpperCAmelCase : ImageInput, __UpperCAmelCase : Optional[bool] = None, __UpperCAmelCase : Optional[Dict[str, int]] = None, __UpperCAmelCase : PILImageResampling = None, __UpperCAmelCase : Optional[bool] = None, __UpperCAmelCase : Optional[Dict[str, int]] = None, __UpperCAmelCase : Optional[bool] = None, __UpperCAmelCase : Optional[float] = None, __UpperCAmelCase : Optional[bool] = None, __UpperCAmelCase : Optional[Union[float, Iterable[float]]] = None, __UpperCAmelCase : Optional[Union[float, Iterable[float]]] = None, __UpperCAmelCase : Optional[TensorType] = None, __UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST, **__UpperCAmelCase : Any, ) -> BatchFeature:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : str = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[str] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Any = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(__UpperCAmelCase, default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Any = get_size_dict(__UpperCAmelCase, param_name='''crop_size''' )
SCREAMING_SNAKE_CASE : Optional[int] = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Dict = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : int = [self.resize(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : Dict = [self.center_crop(__UpperCAmelCase, __UpperCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : str = [self.rescale(__UpperCAmelCase, __UpperCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : str = [self.normalize(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE : Dict = [to_channel_dimension_format(__UpperCAmelCase, __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase, tensor_type=__UpperCAmelCase )
| 507 | 0 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def lowerCAmelCase_ (lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : str ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = hf_hub_url(repo_id=lowercase__ , path=lowercase__ , revision=lowercase__ )
assert url == f'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(lowercase__ )}'
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[str] = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 288 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Optional[Any] = """RegNetConfig"""
# Base docstring
_lowerCamelCase : Tuple = """facebook/regnet-y-040"""
_lowerCamelCase : Dict = [1, 1_088, 7, 7]
# Image classification docstring
_lowerCamelCase : Tuple = """facebook/regnet-y-040"""
_lowerCamelCase : Dict = """tabby, tabby cat"""
_lowerCamelCase : List[str] = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self : Any , snake_case : int , snake_case : int = 3 , snake_case : int = 1 , snake_case : int = 1 , snake_case : Optional[str] = "relu" , **snake_case : Optional[Any] , ):
'''simple docstring'''
super().__init__(**snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
SCREAMING_SNAKE_CASE : str = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE : List[str] = tf.keras.layers.ConvaD(
filters=snake_case , kernel_size=snake_case , strides=snake_case , padding='VALID' , groups=snake_case , use_bias=snake_case , name='convolution' , )
SCREAMING_SNAKE_CASE : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
SCREAMING_SNAKE_CASE : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCamelCase_ ( self : List[str] , snake_case : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.convolution(self.padding(snake_case ) )
SCREAMING_SNAKE_CASE : Any = self.normalization(snake_case )
SCREAMING_SNAKE_CASE : Dict = self.activation(snake_case )
return hidden_state
class lowercase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self : List[Any] , snake_case : RegNetConfig , **snake_case : str ):
'''simple docstring'''
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = config.num_channels
SCREAMING_SNAKE_CASE : int = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCamelCase_ ( self : str , snake_case : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = shape_list(snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
SCREAMING_SNAKE_CASE : Optional[Any] = tf.transpose(snake_case , perm=(0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.embedder(snake_case )
return hidden_state
class lowercase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self : int , snake_case : int , snake_case : int = 2 , **snake_case : int ):
'''simple docstring'''
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE : str = tf.keras.layers.ConvaD(
filters=snake_case , kernel_size=1 , strides=snake_case , use_bias=snake_case , name='convolution' )
SCREAMING_SNAKE_CASE : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCamelCase_ ( self : Dict , snake_case : tf.Tensor , snake_case : bool = False ):
'''simple docstring'''
return self.normalization(self.convolution(snake_case ) , training=snake_case )
class lowercase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case : int , snake_case : int , **snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case , name='pooler' )
SCREAMING_SNAKE_CASE : Any = [
tf.keras.layers.ConvaD(filters=snake_case , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=snake_case , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCamelCase_ ( self : List[str] , snake_case : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.pooler(snake_case )
for layer_module in self.attention:
SCREAMING_SNAKE_CASE : List[str] = layer_module(snake_case )
SCREAMING_SNAKE_CASE : List[str] = hidden_state * pooled
return hidden_state
class lowercase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self : Dict , snake_case : RegNetConfig , snake_case : int , snake_case : int , snake_case : int = 1 , **snake_case : Any ):
'''simple docstring'''
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE : int = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE : Optional[int] = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE : Tuple = (
TFRegNetShortCut(snake_case , stride=snake_case , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
SCREAMING_SNAKE_CASE : Tuple = [
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case , stride=snake_case , groups=snake_case , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=snake_case , name='layer.2' ),
]
SCREAMING_SNAKE_CASE : int = ACTaFN[config.hidden_act]
def lowerCamelCase_ ( self : Tuple , snake_case : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE : List[str] = layer_module(snake_case )
SCREAMING_SNAKE_CASE : Tuple = self.shortcut(snake_case )
hidden_state += residual
SCREAMING_SNAKE_CASE : Dict = self.activation(snake_case )
return hidden_state
class lowercase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case : RegNetConfig , snake_case : int , snake_case : int , snake_case : int = 1 , **snake_case : Optional[int] ):
'''simple docstring'''
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE : List[Any] = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE : str = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE : Dict = (
TFRegNetShortCut(snake_case , stride=snake_case , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
SCREAMING_SNAKE_CASE : List[str] = [
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case , stride=snake_case , groups=snake_case , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(snake_case , kernel_size=1 , activation=snake_case , name='layer.3' ),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCamelCase_ ( self : str , snake_case : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE : Dict = layer_module(snake_case )
SCREAMING_SNAKE_CASE : int = self.shortcut(snake_case )
hidden_state += residual
SCREAMING_SNAKE_CASE : Union[str, Any] = self.activation(snake_case )
return hidden_state
class lowercase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self : List[Any] , snake_case : RegNetConfig , snake_case : int , snake_case : int , snake_case : int = 2 , snake_case : int = 2 , **snake_case : Optional[int] ):
'''simple docstring'''
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
SCREAMING_SNAKE_CASE : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(snake_case , snake_case , snake_case , stride=snake_case , name='layers.0' ),
*[layer(snake_case , snake_case , snake_case , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCamelCase_ ( self : Optional[Any] , snake_case : Dict ):
'''simple docstring'''
for layer_module in self.layers:
SCREAMING_SNAKE_CASE : Optional[int] = layer_module(snake_case )
return hidden_state
class lowercase ( tf.keras.layers.Layer):
'''simple docstring'''
def __init__( self : int , snake_case : RegNetConfig , **snake_case : Dict ):
'''simple docstring'''
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
SCREAMING_SNAKE_CASE : str = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case , snake_case , snake_case , depth=snake_case , name=f'''stages.{i+1}''' ) )
def lowerCamelCase_ ( self : Any , snake_case : tf.Tensor , snake_case : bool = False , snake_case : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE : List[str] = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE : Union[str, Any] = stage_module(snake_case )
if output_hidden_states:
SCREAMING_SNAKE_CASE : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case , hidden_states=snake_case )
@keras_serializable
class lowercase ( tf.keras.layers.Layer):
'''simple docstring'''
UpperCAmelCase : Tuple = RegNetConfig
def __init__( self : Optional[int] , snake_case : Any , **snake_case : Union[str, Any] ):
'''simple docstring'''
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE : List[str] = config
SCREAMING_SNAKE_CASE : int = TFRegNetEmbeddings(snake_case , name='embedder' )
SCREAMING_SNAKE_CASE : Dict = TFRegNetEncoder(snake_case , name='encoder' )
SCREAMING_SNAKE_CASE : Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case , name='pooler' )
@unpack_inputs
def lowerCamelCase_ ( self : Optional[int] , snake_case : tf.Tensor , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : bool = False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : List[Any] = self.embedder(snake_case , training=snake_case )
SCREAMING_SNAKE_CASE : List[str] = self.encoder(
snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case )
SCREAMING_SNAKE_CASE : Any = encoder_outputs[0]
SCREAMING_SNAKE_CASE : List[Any] = self.pooler(snake_case )
# Change to NCHW output format have uniformity in the modules
SCREAMING_SNAKE_CASE : Any = tf.transpose(snake_case , perm=(0, 3, 1, 2) )
SCREAMING_SNAKE_CASE : str = tf.transpose(snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
SCREAMING_SNAKE_CASE : str = tuple([tf.transpose(snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : List[Any] = RegNetConfig
UpperCAmelCase : List[Any] = 'regnet'
UpperCAmelCase : Union[str, Any] = 'pixel_values'
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_lowerCamelCase : Union[str, Any] = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_lowerCamelCase : Optional[int] = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE_ , )
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def __init__( self : int , snake_case : RegNetConfig , *snake_case : Tuple , **snake_case : Optional[Any] ):
'''simple docstring'''
super().__init__(snake_case , *snake_case , **snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = TFRegNetMainLayer(snake_case , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase_ ( self : List[Any] , snake_case : tf.Tensor , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : Union[str, Any]=False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : str = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : int = self.regnet(
pixel_values=snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE_ , )
class lowercase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def __init__( self : Tuple , snake_case : RegNetConfig , *snake_case : Tuple , **snake_case : Dict ):
'''simple docstring'''
super().__init__(snake_case , *snake_case , **snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = config.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = TFRegNetMainLayer(snake_case , name='regnet' )
# classification head
SCREAMING_SNAKE_CASE : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase_ ( self : Optional[int] , snake_case : tf.Tensor = None , snake_case : tf.Tensor = None , snake_case : bool = None , snake_case : bool = None , snake_case : str=False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : int = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : Tuple = self.regnet(
snake_case , output_hidden_states=snake_case , return_dict=snake_case , training=snake_case )
SCREAMING_SNAKE_CASE : Dict = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE : Optional[int] = self.classifier[0](snake_case )
SCREAMING_SNAKE_CASE : Tuple = self.classifier[1](snake_case )
SCREAMING_SNAKE_CASE : List[str] = None if labels is None else self.hf_compute_loss(labels=snake_case , logits=snake_case )
if not return_dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states ) | 352 |
import requests
_lowerCamelCase : int = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def __a ( __lowerCAmelCase ) -> None:
# fetching a list of articles in json format
SCREAMING_SNAKE_CASE : List[str] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""") | 352 | 1 |
def _a ( lowerCamelCase__ ) -> list:
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
lowerCamelCase_ : int = gray_code_sequence_string(__A )
#
# convert them to integers
for i in range(len(__A ) ):
lowerCamelCase_ : Tuple = int(sequence[i] , 2 )
return sequence
def _a ( lowerCamelCase__ ) -> list:
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowerCamelCase_ : Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowerCamelCase_ : Union[str, Any] = gray_code_sequence_string(bit_count - 1 )
lowerCamelCase_ : Optional[Any] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowerCamelCase_ : Union[str, Any] = '''0''' + smaller_sequence[i]
sequence.append(__A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowerCamelCase_ : Tuple = '''1''' + smaller_sequence[i]
sequence.append(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCamelCase__ ( UpperCAmelCase ):
lowerCamelCase_ : List[str] = 'gpt_neox'
def __init__(self : int , _snake_case : List[str]=5_0432 , _snake_case : List[Any]=6144 , _snake_case : Optional[Any]=44 , _snake_case : Dict=64 , _snake_case : Optional[Any]=2_4576 , _snake_case : str="gelu" , _snake_case : Optional[Any]=0.25 , _snake_case : int=1_0000 , _snake_case : int=0.0 , _snake_case : Any=0.0 , _snake_case : List[str]=0.1 , _snake_case : str=2048 , _snake_case : str=0.02 , _snake_case : Dict=1e-5 , _snake_case : int=True , _snake_case : str=0 , _snake_case : Tuple=2 , _snake_case : Tuple=False , _snake_case : int=True , _snake_case : List[str]=None , **_snake_case : List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
lowerCamelCase_ : Optional[int] = vocab_size
lowerCamelCase_ : Tuple = max_position_embeddings
lowerCamelCase_ : List[str] = hidden_size
lowerCamelCase_ : Optional[Any] = num_hidden_layers
lowerCamelCase_ : Union[str, Any] = num_attention_heads
lowerCamelCase_ : Union[str, Any] = intermediate_size
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : Optional[Any] = rotary_pct
lowerCamelCase_ : Tuple = rotary_emb_base
lowerCamelCase_ : List[Any] = attention_dropout
lowerCamelCase_ : int = hidden_dropout
lowerCamelCase_ : List[Any] = classifier_dropout
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : Dict = layer_norm_eps
lowerCamelCase_ : List[str] = use_cache
lowerCamelCase_ : Dict = tie_word_embeddings
lowerCamelCase_ : int = use_parallel_residual
lowerCamelCase_ : Dict = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def UpperCAmelCase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}' )
lowerCamelCase_ : List[str] = self.rope_scaling.get('type' , _snake_case )
lowerCamelCase_ : Any = self.rope_scaling.get('factor' , _snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 144 | 0 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
SCREAMING_SNAKE_CASE : Dict = Mapping[str, np.ndarray]
SCREAMING_SNAKE_CASE : int = Mapping[str, Any] # Is a nested dict.
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.01
@dataclasses.dataclass(frozen=__a )
class UpperCamelCase :
a__ :np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
a__ :np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
a__ :np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
a__ :np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
a__ :np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
a__ :Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
a__ :Optional[str] = None
# Templates used to generate this protein (prediction-only)
a__ :Optional[Sequence[str]] = None
# Chain corresponding to each parent
a__ :Optional[Sequence[int]] = None
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str ):
UpperCamelCase_ : Dict = R"""(\[[A-Z]+\]\n)"""
UpperCamelCase_ : List[str] = [tag.strip() for tag in re.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0]
UpperCamelCase_ : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] )
UpperCamelCase_ : List[str] = ["N", "CA", "C"]
UpperCamelCase_ : int = None
UpperCamelCase_ : Dict = None
UpperCamelCase_ : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
UpperCamelCase_ : Optional[int] = g[1][0].strip()
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if seq[i] not in residue_constants.restypes:
UpperCamelCase_ : Tuple = """X""" # FIXME: strings are immutable
UpperCamelCase_ : str = np.array(
[residue_constants.restype_order.get(_SCREAMING_SNAKE_CASE , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
UpperCamelCase_ : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(_SCREAMING_SNAKE_CASE , g[1][axis].split() ) ) )
UpperCamelCase_ : Union[str, Any] = np.array(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Optional[int] = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Optional[int] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
UpperCamelCase_ : Any = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) )
UpperCamelCase_ : int = np.zeros(
(
len(_SCREAMING_SNAKE_CASE ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : List[Any] = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_SCREAMING_SNAKE_CASE , atom_mask=_SCREAMING_SNAKE_CASE , aatype=_SCREAMING_SNAKE_CASE , residue_index=np.arange(len(_SCREAMING_SNAKE_CASE ) ) , b_factors=_SCREAMING_SNAKE_CASE , )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Protein , _SCREAMING_SNAKE_CASE : int = 0 ):
UpperCamelCase_ : List[str] = []
UpperCamelCase_ : Union[str, Any] = prot.remark
if remark is not None:
pdb_headers.append(f'''REMARK {remark}''' )
UpperCamelCase_ : Tuple = prot.parents
UpperCamelCase_ : List[str] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
UpperCamelCase_ : Optional[int] = [p for i, p in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i == chain_id]
if parents is None or len(_SCREAMING_SNAKE_CASE ) == 0:
UpperCamelCase_ : Optional[Any] = ["""N/A"""]
pdb_headers.append(f'''PARENT {" ".join(_SCREAMING_SNAKE_CASE )}''' )
return pdb_headers
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Protein , _SCREAMING_SNAKE_CASE : str ):
UpperCamelCase_ : List[str] = []
UpperCamelCase_ : Tuple = pdb_str.split("""\n""" )
UpperCamelCase_ : Any = prot.remark
if remark is not None:
out_pdb_lines.append(f'''REMARK {remark}''' )
UpperCamelCase_ : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
UpperCamelCase_ : Optional[Any] = []
if prot.parents_chain_index is not None:
UpperCamelCase_ : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_SCREAMING_SNAKE_CASE ) , [] )
parent_dict[str(_SCREAMING_SNAKE_CASE )].append(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Optional[int] = max([int(_SCREAMING_SNAKE_CASE ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
UpperCamelCase_ : int = parent_dict.get(str(_SCREAMING_SNAKE_CASE ) , ["""N/A"""] )
parents_per_chain.append(_SCREAMING_SNAKE_CASE )
else:
parents_per_chain.append(list(prot.parents ) )
else:
UpperCamelCase_ : Optional[Any] = [["""N/A"""]]
def make_parent_line(_SCREAMING_SNAKE_CASE : Sequence[str] ) -> str:
return f'''PARENT {" ".join(_SCREAMING_SNAKE_CASE )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
UpperCamelCase_ : int = 0
for i, l in enumerate(_SCREAMING_SNAKE_CASE ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_SCREAMING_SNAKE_CASE )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : List[Any] = parents_per_chain[chain_counter]
else:
UpperCamelCase_ : Optional[Any] = ["""N/A"""]
out_pdb_lines.append(make_parent_line(_SCREAMING_SNAKE_CASE ) )
return "\n".join(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Protein ):
UpperCamelCase_ : str = residue_constants.restypes + ["""X"""]
def res_atoa(_SCREAMING_SNAKE_CASE : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , """UNK""" )
UpperCamelCase_ : int = residue_constants.atom_types
UpperCamelCase_ : List[str] = []
UpperCamelCase_ : Optional[Any] = prot.atom_mask
UpperCamelCase_ : Optional[Any] = prot.aatype
UpperCamelCase_ : List[str] = prot.atom_positions
UpperCamelCase_ : int = prot.residue_index.astype(np.intaa )
UpperCamelCase_ : List[Any] = prot.b_factors
UpperCamelCase_ : Optional[int] = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("""Invalid aatypes.""" )
UpperCamelCase_ : Optional[Any] = get_pdb_headers(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
pdb_lines.extend(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Any = aatype.shape[0]
UpperCamelCase_ : Union[str, Any] = 1
UpperCamelCase_ : Any = 0
UpperCamelCase_ : List[str] = string.ascii_uppercase
UpperCamelCase_ : int = None
# Add all atom sites.
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Dict = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_SCREAMING_SNAKE_CASE , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
UpperCamelCase_ : Union[str, Any] = """ATOM"""
UpperCamelCase_ : Dict = atom_name if len(_SCREAMING_SNAKE_CASE ) == 4 else f''' {atom_name}'''
UpperCamelCase_ : List[Any] = """"""
UpperCamelCase_ : Optional[Any] = """"""
UpperCamelCase_ : List[str] = 1.00
UpperCamelCase_ : str = atom_name[0] # Protein supports only C, N, O, S, this works.
UpperCamelCase_ : Optional[int] = """"""
UpperCamelCase_ : Union[str, Any] = """A"""
if chain_index is not None:
UpperCamelCase_ : Optional[int] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
UpperCamelCase_ : List[Any] = (
f'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
f'''{res_name_a:>3} {chain_tag:>1}'''
f'''{residue_index[i]:>4}{insertion_code:>1} '''
f'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
f'''{occupancy:>6.2f}{b_factor:>6.2f} '''
f'''{element:>2}{charge:>2}'''
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
UpperCamelCase_ : Union[str, Any] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Any = chain_index[i + 1]
if should_terminate:
# Close the chain.
UpperCamelCase_ : Union[str, Any] = """TER"""
UpperCamelCase_ : Tuple = (
f'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
pdb_lines.append("""END""" )
pdb_lines.append("""""" )
return "\n".join(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : FeatureDict , _SCREAMING_SNAKE_CASE : ModelOutput , _SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None , _SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[Sequence[str]] = None , _SCREAMING_SNAKE_CASE : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=_SCREAMING_SNAKE_CASE , remark=_SCREAMING_SNAKE_CASE , parents=_SCREAMING_SNAKE_CASE , parents_chain_index=_SCREAMING_SNAKE_CASE , )
| 635 | import numpy as np
import datasets
SCREAMING_SNAKE_CASE : Optional[int] = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
SCREAMING_SNAKE_CASE : Dict = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
SCREAMING_SNAKE_CASE : List[str] = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def A_ (self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def A_ (self , __UpperCamelCase , __UpperCamelCase ) -> int:
# convert to numpy arrays
UpperCamelCase_ : int = np.array(__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = np.array(__UpperCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
UpperCamelCase_ : str = X - np.mean(__UpperCamelCase )
UpperCamelCase_ : Dict = np.cov(reference_distribution.T )
try:
UpperCamelCase_ : str = np.linalg.inv(__UpperCamelCase )
except np.linalg.LinAlgError:
UpperCamelCase_ : List[Any] = np.linalg.pinv(__UpperCamelCase )
UpperCamelCase_ : Tuple = np.dot(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : Tuple = np.dot(__UpperCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 635 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ : List[str] = logging.get_logger(__name__)
UpperCamelCase_ : Tuple = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class _lowercase ( a__ , a__ ):
_a : str = '''resnet'''
_a : Any = ['''basic''', '''bottleneck''']
def __init__( self : Optional[Any] , a : List[Any]=3 , a : str=6_4 , a : str=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , a : List[Any]=[3, 4, 6, 3] , a : int="bottleneck" , a : Optional[int]="relu" , a : List[Any]=False , a : Optional[int]=None , a : Union[str, Any]=None , **a : int , ):
"""simple docstring"""
super().__init__(**_A )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
__snake_case : Dict =num_channels
__snake_case : Optional[int] =embedding_size
__snake_case : Optional[int] =hidden_sizes
__snake_case : int =depths
__snake_case : Optional[int] =layer_type
__snake_case : Any =hidden_act
__snake_case : Any =downsample_in_first_stage
__snake_case : Union[str, Any] =['stem'] + [f'''stage{idx}''' for idx in range(1 , len(_A ) + 1 )]
__snake_case : Dict =get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
class _lowercase ( a__ ):
_a : Dict = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self : int ):
"""simple docstring"""
return 1e-3
| 716 |
"""simple docstring"""
def __lowercase ( a : str , a : str ) -> str:
__snake_case : int =len(a )
__snake_case : int =len(a )
__snake_case : int =(
first_str_length if first_str_length > second_str_length else second_str_length
)
__snake_case : list =[]
for char_count in range(a ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(a )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 497 | 0 |
UpperCamelCase__ = 'Input must be a string of 8 numbers plus letter'
UpperCamelCase__ = 'TRWAGMYFPDXBNJZSQVHLCKE'
def UpperCamelCase__ ( UpperCAmelCase_ ) -> bool:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowercase : List[str] = F'Expected string as input, found {type(UpperCAmelCase_ ).__name__}'
raise TypeError(UpperCAmelCase_ )
_lowercase : str = spanish_id.replace('''-''' , '''''' ).upper()
if len(UpperCAmelCase_ ) != 9:
raise ValueError(UpperCAmelCase_ )
try:
_lowercase : Tuple = int(spanish_id_clean[0:8] )
_lowercase : Dict = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCAmelCase_ ) from ex
if letter.isdigit():
raise ValueError(UpperCAmelCase_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod() | 322 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 2_55 , UpperCamelCase : bool = True , UpperCamelCase : int = 8 , **UpperCamelCase : Dict , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
_lowercase : str = do_rescale
_lowercase : int = rescale_factor
_lowercase : Optional[int] = do_pad
_lowercase : Optional[int] = pad_size
def lowerCAmelCase_ ( self : int , UpperCamelCase : np.ndarray , UpperCamelCase : float , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[Any] ):
"""simple docstring"""
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : int , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None ):
"""simple docstring"""
_lowercase , _lowercase : Dict = get_image_size(UpperCamelCase )
_lowercase : Optional[int] = (old_height // size + 1) * size - old_height
_lowercase : str = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCamelCase )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase : ImageInput , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[float] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : int , ):
"""simple docstring"""
_lowercase : int = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : Optional[int] = do_pad if do_pad is not None else self.do_pad
_lowercase : Any = pad_size if pad_size is not None else self.pad_size
_lowercase : List[Any] = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_lowercase : Optional[int] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_rescale:
_lowercase : str = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_pad:
_lowercase : int = [self.pad(UpperCamelCase , size=UpperCamelCase ) for image in images]
_lowercase : str = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
_lowercase : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase ) | 322 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = """time_series_transformer"""
lowerCAmelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , a__ = None , a__ = None , a__ = "student_t" , a__ = "nll" , a__ = 1 , a__ = [1, 2, 3, 4, 5, 6, 7] , a__ = "mean" , a__ = 0 , a__ = 0 , a__ = 0 , a__ = 0 , a__ = None , a__ = None , a__ = 32 , a__ = 32 , a__ = 2 , a__ = 2 , a__ = 2 , a__ = 2 , a__ = True , a__ = "gelu" , a__ = 64 , a__ = 0.1 , a__ = 0.1 , a__ = 0.1 , a__ = 0.1 , a__ = 0.1 , a__ = 1_00 , a__ = 0.02 , a__=True , **a__ , ):
# time series specific configuration
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length or prediction_length
_UpperCAmelCase = distribution_output
_UpperCAmelCase = loss
_UpperCAmelCase = input_size
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = scaling
_UpperCAmelCase = num_dynamic_real_features
_UpperCAmelCase = num_static_real_features
_UpperCAmelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(a__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
_UpperCAmelCase = cardinality
else:
_UpperCAmelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(a__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
_UpperCAmelCase = embedding_dimension
else:
_UpperCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase = input_size * len(a__ ) + self._number_of_features
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = use_cache
super().__init__(is_encoder_decoder=a__ , **a__ )
@property
def __A ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 494 |
"""simple docstring"""
import sys
lowerCAmelCase_ = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE )
return product
def __lowerCamelCase ( SCREAMING_SNAKE_CASE = N ) -> int:
"""simple docstring"""
_UpperCAmelCase = -sys.maxsize - 1
_UpperCAmelCase = n[:13]
_UpperCAmelCase = 13
while cur_index < len(SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_UpperCAmelCase = substr[1:] + n[cur_index]
cur_index += 1
else:
_UpperCAmelCase = max(SCREAMING_SNAKE_CASE,str_eval(SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 494 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
_UpperCamelCase = {
'''do_resize''': True,
'''size''': {'''height''': 2_24, '''width''': 2_24},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
'''do_convert_rgb''': True,
}
_UpperCamelCase = os.path.join(self.tmpdirname , __a)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(__a , __a)
def UpperCAmelCase ( self , **__a) -> List[Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a)
def UpperCAmelCase ( self , **__a) -> Any:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a)
def UpperCAmelCase ( self , **__a) -> List[str]:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
_UpperCamelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_UpperCamelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a)
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_UpperCamelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , __a)
self.assertIsInstance(processor_fast.tokenizer , __a)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , __a)
self.assertIsInstance(processor_fast.image_processor , __a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_UpperCamelCase = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''')
_UpperCamelCase = self.get_image_processor(do_normalize=__a)
_UpperCamelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=__a)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __a)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = image_processor(__a , return_tensors='''np''')
_UpperCamelCase = processor(images=__a , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase = processor(text=__a)
_UpperCamelCase = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=__a , images=__a)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase = processor.batch_decode(__a)
_UpperCamelCase = tokenizer.batch_decode(__a)
self.assertListEqual(__a , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a)
_UpperCamelCase = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=__a , images=__a)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 19 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 0 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
def get_masked_lm_array(__magic_name__ ):
UpperCAmelCase : Optional[Any] = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : Union[str, Any] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : Optional[Any] = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_array(__magic_name__ ):
UpperCAmelCase : int = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : List[Any] = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : Dict = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_layer_array(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Any = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : Tuple = tf.train.load_variable(__magic_name__ , __magic_name__ )
if "kernel" in name:
UpperCAmelCase : Union[str, Any] = array.transpose()
return torch.from_numpy(__magic_name__ )
def get_encoder_attention_layer_array(__magic_name__ , __magic_name__ , __magic_name__ ):
UpperCAmelCase : Optional[int] = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCAmelCase : Dict = tf.train.load_variable(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = array.reshape(__magic_name__ )
if "kernel" in name:
UpperCAmelCase : Dict = array.transpose()
return torch.from_numpy(__magic_name__ )
print(F"Loading model based on config from {config_path}..." )
UpperCAmelCase : Optional[int] = BertConfig.from_json_file(__magic_name__ )
UpperCAmelCase : Any = BertForMaskedLM(__magic_name__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCAmelCase : BertSelfAttention = layer.attention.self
UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/kernel" , self_attn.query.weight.data.shape )
UpperCAmelCase : List[str] = get_encoder_attention_layer_array(
__magic_name__ , "_query_dense/bias" , self_attn.query.bias.data.shape )
UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/kernel" , self_attn.key.weight.data.shape )
UpperCAmelCase : Any = get_encoder_attention_layer_array(
__magic_name__ , "_key_dense/bias" , self_attn.key.bias.data.shape )
UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/kernel" , self_attn.value.weight.data.shape )
UpperCAmelCase : List[str] = get_encoder_attention_layer_array(
__magic_name__ , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCAmelCase : BertSelfOutput = layer.attention.output
UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/kernel" , self_output.dense.weight.data.shape )
UpperCAmelCase : str = get_encoder_attention_layer_array(
__magic_name__ , "_output_dense/bias" , self_output.dense.bias.data.shape )
UpperCAmelCase : int = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/gamma" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/beta" )
# Intermediate
UpperCAmelCase : BertIntermediate = layer.intermediate
UpperCAmelCase : str = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/kernel" )
UpperCAmelCase : int = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/bias" )
# Output
UpperCAmelCase : BertOutput = layer.output
UpperCAmelCase : List[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/kernel" )
UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_output_dense/bias" )
UpperCAmelCase : List[Any] = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/gamma" )
UpperCAmelCase : Any = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/beta" )
# Embeddings
UpperCAmelCase : List[str] = get_encoder_array("_position_embedding_layer/embeddings" )
UpperCAmelCase : Any = get_encoder_array("_type_embedding_layer/embeddings" )
UpperCAmelCase : str = get_encoder_array("_embedding_norm_layer/gamma" )
UpperCAmelCase : Union[str, Any] = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
UpperCAmelCase : int = model.cls.predictions.transform
UpperCAmelCase : Union[str, Any] = get_masked_lm_array("dense/kernel" )
UpperCAmelCase : Any = get_masked_lm_array("dense/bias" )
UpperCAmelCase : Optional[int] = get_masked_lm_array("layer_norm/gamma" )
UpperCAmelCase : int = get_masked_lm_array("layer_norm/beta" )
UpperCAmelCase : Any = get_masked_lm_array("embedding_table" )
# Pooling
UpperCAmelCase : Dict = BertPooler(config=__magic_name__ )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/kernel" )
UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__magic_name__ )
# Integration test - should load without any errors ;)
UpperCAmelCase : Optional[Any] = BertForMaskedLM.from_pretrained(__magic_name__ )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
a : Tuple = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 609 |
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
UpperCAmelCase : List[Any] = n - k
# Calculate C(n,k)
for i in range(__magic_name__ ):
result *= n - i
result //= i + 1
return result
def lowercase ( __magic_name__ ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __magic_name__ ) // (node_count + 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
if n < 0:
raise ValueError("factorial() not defined for negative values" )
UpperCAmelCase : int = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowercase ( __magic_name__ ):
'''simple docstring'''
return catalan_number(__magic_name__ ) * factorial(__magic_name__ )
if __name__ == "__main__":
a : Optional[Any] = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 609 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589 |
"""simple docstring"""
import numpy as np
def A__ ( __lowerCamelCase ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 589 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _UpperCamelCase ( ):
lowercase_ : str = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=SCREAMING_SNAKE_CASE_ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=SCREAMING_SNAKE_CASE_ )
return parser.parse_args()
def _UpperCamelCase ( ):
lowercase_ : Optional[int] = parse_args()
# Import training_script as a module.
lowercase_ : Optional[int] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase_ : List[Any] = script_fpath.stem
lowercase_ : Tuple = importlib.import_module(SCREAMING_SNAKE_CASE_ )
# Patch sys.argv
lowercase_ : List[str] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 438 | '''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 438 | 1 |
import warnings
from functools import wraps
from typing import Callable
def __SCREAMING_SNAKE_CASE ( a__ : Callable ) -> Callable:
@wraps(a__ )
def _inner_fn(*a__ : Union[str, Any] ,**a__ : Any ):
warnings.warn(
(f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") ,a__ ,)
return fn(*a__ ,**a__ )
return _inner_fn
| 17 |
from __future__ import annotations
import math
def _a ( lowerCAmelCase )-> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
SCREAMING_SNAKE_CASE: List[str] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def _a ( lowerCAmelCase )-> list[int]:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
SCREAMING_SNAKE_CASE_ = []
for num in range(len(lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = 0
while 2 * i * i <= odd_composites[num]:
SCREAMING_SNAKE_CASE_ = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase ) == n:
return list_nums
return []
def _a ( )-> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""") | 360 | 0 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
SCREAMING_SNAKE_CASE__ = {"mgp-str": 27}
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : List[str] = VOCAB_FILES_NAMES
lowerCamelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , UpperCAmelCase : str , UpperCAmelCase : str="[GO]" , UpperCAmelCase : str="[GO]" , UpperCAmelCase : Optional[Any]="[s]" , UpperCAmelCase : Union[str, Any]="[GO]" , **UpperCAmelCase : Union[str, Any]):
super().__init__(
unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , pad_token=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_ :int = json.load(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :str = {v: k for k, v in self.vocab.items()}
@property
def _snake_case ( self : List[str]):
return len(self.vocab)
def _snake_case ( self : List[str]):
return dict(self.vocab , **self.added_tokens_encoder)
def _snake_case ( self : str , UpperCAmelCase : Dict):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
for s in text:
char_tokens.extend(UpperCAmelCase)
return char_tokens
def _snake_case ( self : Optional[Any] , UpperCAmelCase : Any):
return self.vocab.get(UpperCAmelCase , self.vocab.get(self.unk_token))
def _snake_case ( self : List[Any] , UpperCAmelCase : Any):
return self.decoder.get(UpperCAmelCase)
def _snake_case ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase):
logger.error("Vocabulary path ({}) should be a directory".format(UpperCAmelCase))
return
SCREAMING_SNAKE_CASE_ :Optional[Any] = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
with open(UpperCAmelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase) + "\n")
return (vocab_file,)
| 718 |
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
SCREAMING_SNAKE_CASE_ :set[int] = set()
return any(
node not in visited and depth_first_search(a , a , a , a )
for node in graph )
def lowercase ( a , a , a , a ):
'''simple docstring'''
visited.add(a )
rec_stk.add(a )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a , a , a , a ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 140 | 0 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase_ ( __A : list[int] , __A : tuple[int, ...] ) -> str | None:
"""simple docstring"""
lowercase : str =""
lowercase : int
lowercase : int
lowercase : int
for keychar, cipherchar in zip(cycle(__A ) , __A ):
lowercase : str =cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__A )
return decoded
def lowercase_ ( __A : list[int] ) -> list[str]:
"""simple docstring"""
lowercase : list[str] =[]
for key in product(__A , repeat=3 ):
lowercase : List[str] =try_key(__A , __A )
if encoded is not None:
possibles.append(__A )
return possibles
def lowercase_ ( __A : list[str] , __A : str ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase_ ( __A : str = "p059_cipher.txt" ) -> int:
"""simple docstring"""
lowercase : list[int]
lowercase : list[str]
lowercase : str
lowercase : str
lowercase : str =Path(__A ).parent.joinpath(__A ).read_text(encoding='''utf-8''' )
lowercase : List[Any] =[int(__A ) for number in data.strip().split(''',''' )]
lowercase : Tuple =filter_valid_chars(__A )
for common_word in COMMON_WORDS:
lowercase : Optional[Any] =filter_common_word(__A , __A )
if len(__A ) == 1:
break
lowercase : List[Any] =possibles[0]
return sum(ord(__A ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 94 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : list )-> list:
'''simple docstring'''
UpperCAmelCase__ : List[str] = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCAmelCase__ : List[Any] = True
for i in range(0 , len(snake_case ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase__ : int = False
for i in range(1 , len(snake_case ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase__ : str = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_lowerCAmelCase : List[str] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_lowerCAmelCase : Optional[int] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 438 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowercase_ ( lowerCAmelCase__ ):
__UpperCamelCase = 42
class lowercase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
__UpperCamelCase = True
@register_to_config
def __init__( self: Optional[Any], _lowercase: int = 3, _lowercase: int = 3, _lowercase: Tuple[str] = ("DownEncoderBlock2D",), _lowercase: Tuple[str] = ("UpDecoderBlock2D",), _lowercase: Tuple[int] = (64,), _lowercase: int = 1, _lowercase: str = "silu", _lowercase: int = 4, _lowercase: int = 32, _lowercase: int = 32, _lowercase: float = 0.18_215, ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
__lowerCAmelCase = Encoder(
in_channels=_lowercase, out_channels=_lowercase, down_block_types=_lowercase, block_out_channels=_lowercase, layers_per_block=_lowercase, act_fn=_lowercase, norm_num_groups=_lowercase, double_z=_lowercase, )
# pass init params to Decoder
__lowerCAmelCase = Decoder(
in_channels=_lowercase, out_channels=_lowercase, up_block_types=_lowercase, block_out_channels=_lowercase, layers_per_block=_lowercase, norm_num_groups=_lowercase, act_fn=_lowercase, )
__lowerCAmelCase = nn.Convad(2 * latent_channels, 2 * latent_channels, 1)
__lowerCAmelCase = nn.Convad(_lowercase, _lowercase, 1)
__lowerCAmelCase = False
__lowerCAmelCase = False
# only relevant if vae tiling is enabled
__lowerCAmelCase = self.config.sample_size
__lowerCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size, (list, tuple))
else self.config.sample_size
)
__lowerCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
__lowerCAmelCase = 0.25
def _lowercase ( self: int, _lowercase: Tuple, _lowercase: List[Any]=False):
'''simple docstring'''
if isinstance(_lowercase, (Encoder, Decoder)):
__lowerCAmelCase = value
def _lowercase ( self: Optional[Any], _lowercase: bool = True):
'''simple docstring'''
__lowerCAmelCase = use_tiling
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
self.enable_tiling(_lowercase)
def _lowercase ( self: Union[str, Any]):
'''simple docstring'''
__lowerCAmelCase = True
def _lowercase ( self: List[Any]):
'''simple docstring'''
__lowerCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _lowercase ( self: List[str]):
'''simple docstring'''
__lowerCAmelCase = {}
def fn_recursive_add_processors(_lowercase: str, _lowercase: torch.nn.Module, _lowercase: Dict[str, AttentionProcessor]):
if hasattr(_lowercase, """set_processor"""):
__lowerCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''', _lowercase, _lowercase)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_lowercase, _lowercase, _lowercase)
return processors
def _lowercase ( self: Dict, _lowercase: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
'''simple docstring'''
__lowerCAmelCase = len(self.attn_processors.keys())
if isinstance(_lowercase, _lowercase) and len(_lowercase) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(_lowercase)} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(_lowercase: str, _lowercase: torch.nn.Module, _lowercase: Dict):
if hasattr(_lowercase, """set_processor"""):
if not isinstance(_lowercase, _lowercase):
module.set_processor(_lowercase)
else:
module.set_processor(processor.pop(f'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''', _lowercase, _lowercase)
for name, module in self.named_children():
fn_recursive_attn_processor(_lowercase, _lowercase, _lowercase)
def _lowercase ( self: Dict):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def _lowercase ( self: str, _lowercase: torch.FloatTensor, _lowercase: bool = True):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_lowercase, return_dict=_lowercase)
if self.use_slicing and x.shape[0] > 1:
__lowerCAmelCase = [self.encoder(_lowercase) for x_slice in x.split(1)]
__lowerCAmelCase = torch.cat(_lowercase)
else:
__lowerCAmelCase = self.encoder(_lowercase)
__lowerCAmelCase = self.quant_conv(_lowercase)
__lowerCAmelCase = DiagonalGaussianDistribution(_lowercase)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_lowercase)
def _lowercase ( self: Optional[int], _lowercase: torch.FloatTensor, _lowercase: bool = True):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_lowercase, return_dict=_lowercase)
__lowerCAmelCase = self.post_quant_conv(_lowercase)
__lowerCAmelCase = self.decoder(_lowercase)
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowercase)
@apply_forward_hook
def _lowercase ( self: List[Any], _lowercase: torch.FloatTensor, _lowercase: bool = True):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
__lowerCAmelCase = [self._decode(_lowercase).sample for z_slice in z.split(1)]
__lowerCAmelCase = torch.cat(_lowercase)
else:
__lowerCAmelCase = self._decode(_lowercase).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_lowercase)
def _lowercase ( self: List[str], _lowercase: Dict, _lowercase: Tuple, _lowercase: Any):
'''simple docstring'''
__lowerCAmelCase = min(a.shape[2], b.shape[2], _lowercase)
for y in range(_lowercase):
__lowerCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _lowercase ( self: int, _lowercase: Union[str, Any], _lowercase: Any, _lowercase: Dict):
'''simple docstring'''
__lowerCAmelCase = min(a.shape[3], b.shape[3], _lowercase)
for x in range(_lowercase):
__lowerCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _lowercase ( self: List[Any], _lowercase: torch.FloatTensor, _lowercase: bool = True):
'''simple docstring'''
__lowerCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
__lowerCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor)
__lowerCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCAmelCase = []
for i in range(0, x.shape[2], _lowercase):
__lowerCAmelCase = []
for j in range(0, x.shape[3], _lowercase):
__lowerCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCAmelCase = self.encoder(_lowercase)
__lowerCAmelCase = self.quant_conv(_lowercase)
row.append(_lowercase)
rows.append(_lowercase)
__lowerCAmelCase = []
for i, row in enumerate(_lowercase):
__lowerCAmelCase = []
for j, tile in enumerate(_lowercase):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j], _lowercase, _lowercase)
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1], _lowercase, _lowercase)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(_lowercase, dim=3))
__lowerCAmelCase = torch.cat(_lowercase, dim=2)
__lowerCAmelCase = DiagonalGaussianDistribution(_lowercase)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_lowercase)
def _lowercase ( self: List[Any], _lowercase: torch.FloatTensor, _lowercase: bool = True):
'''simple docstring'''
__lowerCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
__lowerCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor)
__lowerCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCAmelCase = []
for i in range(0, z.shape[2], _lowercase):
__lowerCAmelCase = []
for j in range(0, z.shape[3], _lowercase):
__lowerCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCAmelCase = self.post_quant_conv(_lowercase)
__lowerCAmelCase = self.decoder(_lowercase)
row.append(_lowercase)
rows.append(_lowercase)
__lowerCAmelCase = []
for i, row in enumerate(_lowercase):
__lowerCAmelCase = []
for j, tile in enumerate(_lowercase):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j], _lowercase, _lowercase)
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1], _lowercase, _lowercase)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(_lowercase, dim=3))
__lowerCAmelCase = torch.cat(_lowercase, dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowercase)
def _lowercase ( self: List[str], _lowercase: torch.FloatTensor, _lowercase: bool = False, _lowercase: bool = True, _lowercase: Optional[torch.Generator] = None, ):
'''simple docstring'''
__lowerCAmelCase = sample
__lowerCAmelCase = self.encode(_lowercase).latent_dist
if sample_posterior:
__lowerCAmelCase = posterior.sample(generator=_lowercase)
else:
__lowerCAmelCase = posterior.mode()
__lowerCAmelCase = self.decode(_lowercase).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowercase)
| 334 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__A : Optional[Any] = logging.getLogger(__name__)
if __name__ == "__main__":
__A : int = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30_522, type=int)
__A : Optional[Any] = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, "rb") as fp:
__A : Union[str, Any] = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
__A : Optional[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
__A : int = [0] * args.vocab_size
for k, v in counter.items():
__A : Optional[int] = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 334 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
__UpperCamelCase : Union[str, Any] = generate_large_matrix()
__UpperCamelCase : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
assert all(row == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for row in grid )
assert all(list(_UpperCAmelCase ) == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for col in zip(*_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] ):
lowerCAmelCase = 0
lowerCAmelCase = len(_UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase = (left + right) // 2
lowerCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase = mid + 1
else:
lowerCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(grid[0] )
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(_UpperCAmelCase ) * len(grid[0] )) - total
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
lowerCAmelCase = 0
for row in grid:
for i, number in enumerate(_UpperCAmelCase ):
if number < 0:
total += len(_UpperCAmelCase ) - i
break
return total
def _SCREAMING_SNAKE_CASE ():
from timeit import timeit
print('Running benchmarks' )
lowerCAmelCase = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase = timeit(F'{func}(grid=grid)' , setup=_UpperCAmelCase , number=500 )
print(F'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 4 |
import sys
def __magic_name__ ( __lowerCAmelCase : str ) -> Union[str, Any]:
__lowerCamelCase = len(__lowerCAmelCase )
__lowerCamelCase = [[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
__lowerCamelCase = [[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
for chain_length in range(2 , __lowerCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
__lowerCamelCase = a + chain_length - 1
__lowerCamelCase = sys.maxsize
for c in range(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__lowerCamelCase = cost
__lowerCamelCase = c
return matrix, sol
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> List[str]:
if i == j:
print('''A''' + str(__lowerCAmelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__lowerCAmelCase , __lowerCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCAmelCase , optimal_solution[i][j] + 1 , __lowerCAmelCase )
print(''')''' , end=''' ''' )
def __magic_name__ ( ) -> Optional[Any]:
__lowerCamelCase = [30, 35, 15, 5, 10, 20, 25]
__lowerCamelCase = len(__lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__lowerCamelCase , __lowerCamelCase = matrix_chain_order(__lowerCAmelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 298 | 0 |
"""simple docstring"""
a : Union[str, Any] = 8.3_14_45_98
def _UpperCamelCase ( _A , _A ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a : Union[str, Any] = 3_0_0
a : Dict = 2_8
a : Optional[Any] = rms_speed_of_molecule(temperature, molar_mass)
print(F"Vrms of Nitrogen gas at 300 K is {vrms} m/s") | 19 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase ( _A , _A=False ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
a : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
a : Tuple = parse_flag_from_env('''RUN_REMOTE''', default=False)
a : Union[str, Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
a : int = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a : List[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires faiss""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires regex""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires elasticsearch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires sqlalchemy""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires PyTorch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires TensorFlow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires JAX""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires Pillow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
def _require_spacy_model(_A ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCAmelCase = unittest.skip("""test is slow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_UpperCAmelCase = unittest.skip("""test is local""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCAmelCase = unittest.skip("""test is packaged""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCAmelCase = unittest.skip("""test requires remote""" )(_A )
return test_case
def _UpperCamelCase ( *_A ) -> Dict:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith("""test""" ):
for decorator in decorators:
_UpperCAmelCase = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class a_ ( _UpperCAmelCase ):
pass
class a_ ( _UpperCAmelCase ):
a : Any = 0
a : Optional[Any] = 1
a : int = 2
@contextmanager
def _UpperCamelCase ( _A=OfflineSimulationMode.CONNECTION_FAILS , _A=1e-16 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = requests.Session().request
def timeout_request(_A , _A , _A , **_A ):
# Change the url to an invalid url so that the connection hangs
_UpperCAmelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_UpperCAmelCase = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCAmelCase = url
_UpperCAmelCase = e.args[0]
_UpperCAmelCase = (max_retry_error.args[0].replace("""10.255.255.1""" , F"""OfflineMock[{url}]""" ),)
_UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(_A , _A , **_A ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _UpperCamelCase ( *_A , **_A ) -> str:
"""simple docstring"""
_UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
return deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist()
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A , *_A , **_A ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith("""500""" ) or str(_A ).startswith("""502""" ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class a_ :
def __init__( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_A )
else:
break
async def _UpperCamelCase ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(_A ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_A , _A , _A , _A="" ):
_UpperCAmelCase = line.decode("""utf-8""" ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label="""stderr:""" ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def _UpperCamelCase ( _A , _A=None , _A=None , _A=1_8_0 , _A=False , _A=True ) -> _RunOutput:
"""simple docstring"""
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
_UpperCAmelCase = """ """.join(_A )
if result.returncode > 0:
_UpperCAmelCase = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
_UpperCAmelCase = re.sub(R"""^gw""" , """""" , _A , 0 , re.M )
return int(_A )
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 2_9_5_0_0
_UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta | 19 | 1 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location="cpu" )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location="cpu" )["model"]
# pop unnecessary weights
SCREAMING_SNAKE_CASE = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE = sd.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE = key.replace(".qkv_proj." , ".q_proj." )
SCREAMING_SNAKE_CASE = key.replace(".qkv_proj." , ".k_proj." )
SCREAMING_SNAKE_CASE = key.replace(".qkv_proj." , ".v_proj." )
SCREAMING_SNAKE_CASE = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.split(UpperCAmelCase__ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE = q
SCREAMING_SNAKE_CASE = k
SCREAMING_SNAKE_CASE = v
del sd[key]
return sd
@torch.no_grad()
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=None ):
SCREAMING_SNAKE_CASE = load_checkpoint(UpperCAmelCase__ )
if config is not None:
SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = OPTConfig()
SCREAMING_SNAKE_CASE = OPTModel(UpperCAmelCase__ ).half().eval()
model.load_state_dict(UpperCAmelCase__ )
# Check results
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
_lowerCamelCase : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 403 | import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict=[] ):
SCREAMING_SNAKE_CASE = size[0] - overlap_pixels * 2
SCREAMING_SNAKE_CASE = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
SCREAMING_SNAKE_CASE = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_5_5
SCREAMING_SNAKE_CASE = np.pad(UpperCAmelCase__ , mode="linear_ramp" , pad_width=UpperCAmelCase__ , end_values=0 )
if "l" in remove_borders:
SCREAMING_SNAKE_CASE = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
SCREAMING_SNAKE_CASE = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
SCREAMING_SNAKE_CASE = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
SCREAMING_SNAKE_CASE = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ):
return max(UpperCAmelCase__ , min(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __lowerCamelCase (UpperCAmelCase__ : [int] , UpperCAmelCase__ : [int] , UpperCAmelCase__ : [int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __lowerCamelCase (UpperCAmelCase__ : [int] , UpperCAmelCase__ : int , UpperCAmelCase__ : [int] ):
SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
SCREAMING_SNAKE_CASE = clamp_rect(UpperCAmelCase__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(UpperCAmelCase__ , (original_slice, 0) )
return result
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ):
SCREAMING_SNAKE_CASE = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
SCREAMING_SNAKE_CASE = tile.crop(UpperCAmelCase__ )
return tile
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = n % d
return n - divisor
class lowercase ( a ):
def __init__( self : Tuple , _UpperCamelCase : AutoencoderKL , _UpperCamelCase : CLIPTextModel , _UpperCamelCase : CLIPTokenizer , _UpperCamelCase : UNetaDConditionModel , _UpperCamelCase : DDPMScheduler , _UpperCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCamelCase : int = 350 , ) -> Dict:
'''simple docstring'''
super().__init__(
vae=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , unet=_UpperCamelCase , low_res_scheduler=_UpperCamelCase , scheduler=_UpperCamelCase , max_noise_level=_UpperCamelCase , )
def __snake_case( self : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , **_UpperCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
SCREAMING_SNAKE_CASE = add_overlap_rect(_UpperCamelCase , _UpperCamelCase , image.size )
SCREAMING_SNAKE_CASE = image.crop(_UpperCamelCase )
SCREAMING_SNAKE_CASE = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
SCREAMING_SNAKE_CASE = translated_slice_x - (original_image_slice / 2)
SCREAMING_SNAKE_CASE = max(0 , _UpperCamelCase )
SCREAMING_SNAKE_CASE = squeeze_tile(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = to_input.size
SCREAMING_SNAKE_CASE = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
SCREAMING_SNAKE_CASE = super(_UpperCamelCase , self ).__call__(image=_UpperCamelCase , **_UpperCamelCase ).images[0]
SCREAMING_SNAKE_CASE = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE = unsqueeze_tile(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
SCREAMING_SNAKE_CASE = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=_UpperCamelCase ) , mode="L" , )
final_image.paste(
_UpperCamelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , _UpperCamelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , _UpperCamelCase : Union[str, List[str]] , _UpperCamelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , _UpperCamelCase : int = 75 , _UpperCamelCase : float = 9.0 , _UpperCamelCase : int = 50 , _UpperCamelCase : Optional[Union[str, List[str]]] = None , _UpperCamelCase : Optional[int] = 1 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : Optional[torch.Generator] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 128 , _UpperCamelCase : int = 32 , _UpperCamelCase : int = 32 , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
SCREAMING_SNAKE_CASE = math.ceil(image.size[0] / tile_size )
SCREAMING_SNAKE_CASE = math.ceil(image.size[1] / tile_size )
SCREAMING_SNAKE_CASE = tcx * tcy
SCREAMING_SNAKE_CASE = 0
for y in range(_UpperCamelCase ):
for x in range(_UpperCamelCase ):
self._process_tile(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , prompt=_UpperCamelCase , num_inference_steps=_UpperCamelCase , guidance_scale=_UpperCamelCase , noise_level=_UpperCamelCase , negative_prompt=_UpperCamelCase , num_images_per_prompt=_UpperCamelCase , eta=_UpperCamelCase , generator=_UpperCamelCase , latents=_UpperCamelCase , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def __lowerCamelCase ():
# Run a demo
SCREAMING_SNAKE_CASE = "stabilityai/stable-diffusion-x4-upscaler"
SCREAMING_SNAKE_CASE = StableDiffusionTiledUpscalePipeline.from_pretrained(UpperCAmelCase__ , revision="fp16" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE = pipe.to("cuda" )
SCREAMING_SNAKE_CASE = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(UpperCAmelCase__ : List[Any] ):
print(F"progress: {obj['progress']:.4f}" )
obj["image"].save("diffusers_library_progress.jpg" )
SCREAMING_SNAKE_CASE = pipe(image=UpperCAmelCase__ , prompt="Black font, white background, vector" , noise_level=4_0 , callback=UpperCAmelCase__ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 403 | 1 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[Union[str, Path]] = None
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : Optional[Dict] = None
lowerCamelCase : Optional[str] = None
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : int = 1
lowerCamelCase : Optional[Union[str, bool]] = None
lowerCamelCase : bool = False
lowerCamelCase : Optional[Dict] = None
lowerCamelCase : Optional[str] = None
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
return self.__class__(**{k: copy.deepcopy(_UpperCAmelCase ) for k, v in self.__dict__.items()} ) | 382 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
a = """base_with_context"""
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : List[str] ):
"""simple docstring"""
_lowerCAmelCase :Dict = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
_lowerCAmelCase :Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__magic_name__ )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase :Optional[int] = weights[f"""layers_{lyr_num}"""]
_lowerCAmelCase :Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_lowerCAmelCase :Any = ly_weight['attention']
_lowerCAmelCase :Any = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowerCAmelCase :Any = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowerCAmelCase :List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_lowerCAmelCase :Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_lowerCAmelCase :Optional[int] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : Tuple ):
"""simple docstring"""
_lowerCAmelCase :int = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
_lowerCAmelCase :Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__magic_name__ )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase :Any = weights[f"""layers_{lyr_num}"""]
_lowerCAmelCase :str = ly_weight['attention']
_lowerCAmelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowerCAmelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowerCAmelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowerCAmelCase :Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_lowerCAmelCase :int = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_lowerCAmelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_lowerCAmelCase :str = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] ):
"""simple docstring"""
_lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
_lowerCAmelCase :Any = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
_lowerCAmelCase :Dict = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__magic_name__ )
_lowerCAmelCase :List[Any] = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_lowerCAmelCase :int = weights[f"""layers_{lyr_num}"""]
_lowerCAmelCase :Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
_lowerCAmelCase :Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
_lowerCAmelCase :Tuple = ly_weight['self_attention']
_lowerCAmelCase :Dict = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowerCAmelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowerCAmelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowerCAmelCase :List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowerCAmelCase :List[Any] = ly_weight['MultiHeadDotProductAttention_0']
_lowerCAmelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowerCAmelCase :Any = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowerCAmelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowerCAmelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowerCAmelCase :Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
_lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_lowerCAmelCase :int = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
_lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_lowerCAmelCase :List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_lowerCAmelCase :List[str] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
_lowerCAmelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def UpperCamelCase_( __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :Dict = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_lowerCAmelCase :Tuple = jnp.tree_util.tree_map(onp.array , __magic_name__ )
_lowerCAmelCase :List[str] = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
_lowerCAmelCase :Any = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
_lowerCAmelCase :Tuple = inference.parse_training_gin_file(__magic_name__ , __magic_name__ )
_lowerCAmelCase :List[Any] = inference.InferenceModel(args.checkpoint_path , __magic_name__ )
_lowerCAmelCase :Dict = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
_lowerCAmelCase :Dict = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_lowerCAmelCase :Any = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_lowerCAmelCase :Any = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_lowerCAmelCase :str = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __magic_name__ )
_lowerCAmelCase :Optional[int] = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __magic_name__ )
_lowerCAmelCase :List[str] = load_decoder(ta_checkpoint['target']['decoder'] , __magic_name__ )
_lowerCAmelCase :Tuple = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
_lowerCAmelCase :Union[str, Any] = SpectrogramDiffusionPipeline(
notes_encoder=__magic_name__ , continuous_encoder=__magic_name__ , decoder=__magic_name__ , scheduler=__magic_name__ , melgan=__magic_name__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
a = parser.parse_args()
main(args) | 382 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : "DiagonalGaussianDistribution"
class a__( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = True
@register_to_config
def __init__( self , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = ("DownEncoderBlock2D",) , __lowerCAmelCase = ("UpDecoderBlock2D",) , __lowerCAmelCase = (64,) , __lowerCAmelCase = 1 , __lowerCAmelCase = "silu" , __lowerCAmelCase = 4 , __lowerCAmelCase = 32 , __lowerCAmelCase = 32 , __lowerCAmelCase = 0.18215 , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , down_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , double_z=__lowerCAmelCase , )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , up_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , act_fn=__lowerCAmelCase , )
lowerCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1)
lowerCAmelCase = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1)
lowerCAmelCase = False
lowerCAmelCase = False
# only relevant if vae tiling is enabled
lowerCAmelCase = self.config.sample_size
lowerCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple))
else self.config.sample_size
)
lowerCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
lowerCAmelCase = 0.25
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=False):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (Encoder, Decoder)):
lowerCAmelCase = value
def a_ ( self , __lowerCAmelCase = True):
"""simple docstring"""
lowerCAmelCase = use_tiling
def a_ ( self):
"""simple docstring"""
self.enable_tiling(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = True
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {}
def fn_recursive_add_processors(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
if hasattr(__lowerCAmelCase , """set_processor"""):
lowerCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , __lowerCAmelCase , __lowerCAmelCase)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
return processors
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = len(self.attn_processors.keys())
if isinstance(__lowerCAmelCase , __lowerCAmelCase) and len(__lowerCAmelCase) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(__lowerCAmelCase)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes.")
def fn_recursive_attn_processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
if hasattr(__lowerCAmelCase , """set_processor"""):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase):
module.set_processor(__lowerCAmelCase)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , __lowerCAmelCase , __lowerCAmelCase)
for name, module in self.named_children():
fn_recursive_attn_processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = True):
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__lowerCAmelCase , return_dict=__lowerCAmelCase)
if self.use_slicing and x.shape[0] > 1:
lowerCAmelCase = [self.encoder(__lowerCAmelCase) for x_slice in x.split(1)]
lowerCAmelCase = torch.cat(__lowerCAmelCase)
else:
lowerCAmelCase = self.encoder(__lowerCAmelCase)
lowerCAmelCase = self.quant_conv(__lowerCAmelCase)
lowerCAmelCase = DiagonalGaussianDistribution(__lowerCAmelCase)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = True):
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__lowerCAmelCase , return_dict=__lowerCAmelCase)
lowerCAmelCase = self.post_quant_conv(__lowerCAmelCase)
lowerCAmelCase = self.decoder(__lowerCAmelCase)
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase)
@apply_forward_hook
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = True):
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
lowerCAmelCase = [self._decode(__lowerCAmelCase).sample for z_slice in z.split(1)]
lowerCAmelCase = torch.cat(__lowerCAmelCase)
else:
lowerCAmelCase = self._decode(__lowerCAmelCase).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = min(a.shape[2] , b.shape[2] , __lowerCAmelCase)
for y in range(__lowerCAmelCase):
lowerCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = min(a.shape[3] , b.shape[3] , __lowerCAmelCase)
for x in range(__lowerCAmelCase):
lowerCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = True):
"""simple docstring"""
lowerCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
lowerCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor)
lowerCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
lowerCAmelCase = []
for i in range(0 , x.shape[2] , __lowerCAmelCase):
lowerCAmelCase = []
for j in range(0 , x.shape[3] , __lowerCAmelCase):
lowerCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
lowerCAmelCase = self.encoder(__lowerCAmelCase)
lowerCAmelCase = self.quant_conv(__lowerCAmelCase)
row.append(__lowerCAmelCase)
rows.append(__lowerCAmelCase)
lowerCAmelCase = []
for i, row in enumerate(__lowerCAmelCase):
lowerCAmelCase = []
for j, tile in enumerate(__lowerCAmelCase):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowerCAmelCase = self.blend_v(rows[i - 1][j] , __lowerCAmelCase , __lowerCAmelCase)
if j > 0:
lowerCAmelCase = self.blend_h(row[j - 1] , __lowerCAmelCase , __lowerCAmelCase)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(__lowerCAmelCase , dim=3))
lowerCAmelCase = torch.cat(__lowerCAmelCase , dim=2)
lowerCAmelCase = DiagonalGaussianDistribution(__lowerCAmelCase)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = True):
"""simple docstring"""
lowerCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
lowerCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor)
lowerCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
lowerCAmelCase = []
for i in range(0 , z.shape[2] , __lowerCAmelCase):
lowerCAmelCase = []
for j in range(0 , z.shape[3] , __lowerCAmelCase):
lowerCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
lowerCAmelCase = self.post_quant_conv(__lowerCAmelCase)
lowerCAmelCase = self.decoder(__lowerCAmelCase)
row.append(__lowerCAmelCase)
rows.append(__lowerCAmelCase)
lowerCAmelCase = []
for i, row in enumerate(__lowerCAmelCase):
lowerCAmelCase = []
for j, tile in enumerate(__lowerCAmelCase):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowerCAmelCase = self.blend_v(rows[i - 1][j] , __lowerCAmelCase , __lowerCAmelCase)
if j > 0:
lowerCAmelCase = self.blend_h(row[j - 1] , __lowerCAmelCase , __lowerCAmelCase)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(__lowerCAmelCase , dim=3))
lowerCAmelCase = torch.cat(__lowerCAmelCase , dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = sample
lowerCAmelCase = self.encode(__lowerCAmelCase).latent_dist
if sample_posterior:
lowerCAmelCase = posterior.sample(generator=__lowerCAmelCase)
else:
lowerCAmelCase = posterior.mode()
lowerCAmelCase = self.decode(__lowerCAmelCase).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase)
| 370 | '''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['''input_values''', '''attention_mask''']
def __init__( self , __lowerCAmelCase = 1 , __lowerCAmelCase = 16000 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = False , __lowerCAmelCase = 80 , __lowerCAmelCase = 16 , __lowerCAmelCase = 64 , __lowerCAmelCase = "hann_window" , __lowerCAmelCase = 1.0 , __lowerCAmelCase = 80 , __lowerCAmelCase = 7600 , __lowerCAmelCase = 1E-1_0 , __lowerCAmelCase = 2 , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = do_normalize
lowerCAmelCase = return_attention_mask
lowerCAmelCase = num_mel_bins
lowerCAmelCase = hop_length
lowerCAmelCase = win_length
lowerCAmelCase = win_function
lowerCAmelCase = frame_signal_scale
lowerCAmelCase = fmin
lowerCAmelCase = fmax
lowerCAmelCase = mel_floor
lowerCAmelCase = reduction_factor
lowerCAmelCase = win_length * sampling_rate // 1000
lowerCAmelCase = hop_length * sampling_rate // 1000
lowerCAmelCase = optimal_fft_length(self.sample_size)
lowerCAmelCase = (self.n_fft // 2) + 1
lowerCAmelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=__lowerCAmelCase)
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , __lowerCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , __lowerCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def a_ ( self , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""")
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
if audio is not None:
lowerCAmelCase = self._process_audio(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , )
else:
lowerCAmelCase = None
if audio_target is not None:
lowerCAmelCase = self._process_audio(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , )
if inputs is None:
return inputs_target
else:
lowerCAmelCase = inputs_target["""input_values"""]
lowerCAmelCase = inputs_target.get("""attention_mask""")
if decoder_attention_mask is not None:
lowerCAmelCase = decoder_attention_mask
return inputs
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(speech.shape) > 1
if is_batched_numpy and len(speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for speech in speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase = [self._extract_mel_features(__lowerCAmelCase) for waveform in speech]
lowerCAmelCase = BatchFeature({"""input_values""": features})
lowerCAmelCase = self.num_mel_bins
else:
lowerCAmelCase = BatchFeature({"""input_values""": speech})
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = feature_size_hack
# convert input values to correct format
lowerCAmelCase = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for array in input_values]
elif (
not isinstance(__lowerCAmelCase , np.ndarray)
and isinstance(input_values[0] , np.ndarray)
and input_values[0].dtype is np.dtype(np.floataa)
):
lowerCAmelCase = [array.astype(np.floataa) for array in input_values]
elif isinstance(__lowerCAmelCase , np.ndarray) and input_values.dtype is np.dtype(np.floataa):
lowerCAmelCase = input_values.astype(np.floataa)
# convert attention_mask to correct format
lowerCAmelCase = padded_inputs.get("""attention_mask""")
if attention_mask is not None:
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.intaa) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase = (
attention_mask
if self._get_padding_strategies(__lowerCAmelCase , max_length=__lowerCAmelCase) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=__lowerCAmelCase , padding_value=self.padding_value)
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 370 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> bool:
UpperCAmelCase__ : Union[str, Any] = 0
for ch in input_str:
UpperCAmelCase__ : Optional[Any] = ord(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = pow(2 , lowerCAmelCase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'fnet'
def __init__( self : List[str] , _A : Dict=32_000 , _A : Optional[Any]=768 , _A : Tuple=12 , _A : int=3_072 , _A : Union[str, Any]="gelu_new" , _A : int=0.1 , _A : List[Any]=512 , _A : List[str]=4 , _A : Optional[int]=0.0_2 , _A : List[str]=1e-12 , _A : Union[str, Any]=False , _A : Any=512 , _A : int=3 , _A : str=1 , _A : List[str]=2 , **_A : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : List[str] = layer_norm_eps
UpperCAmelCase__ : Tuple = use_tpu_fourier_optimizations
UpperCAmelCase__ : Union[str, Any] = tpu_short_seq_length
| 312 | 1 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def A__ ( A_ ) -> Optional[int]:
_lowercase = int(A_ )
_lowercase , _lowercase , _lowercase = t // 3_600, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def A__ ( A_ , A_ , A_ , A_ , A_=300 ) -> Dict:
# docstyle-ignore
return F"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def A__ ( A_ ) -> int:
_lowercase = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase = F"""{elt:.6f}""" if isinstance(A_ , A_ ) else str(A_ )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = 5
UpperCAmelCase__ = 0.2
def __init__( self : Optional[int] , __A : int , __A : Optional[str] = None , __A : bool = True , __A : Optional["NotebookTrainingTracker"] = None , __A : int = 3_0_0 , ):
"""simple docstring"""
_lowercase = total
_lowercase = "" if prefix is None else prefix
_lowercase = leave
_lowercase = parent
_lowercase = width
_lowercase = None
_lowercase = None
_lowercase = None
def snake_case ( self : int , __A : int , __A : bool = False , __A : str = None ):
"""simple docstring"""
_lowercase = value
if comment is not None:
_lowercase = comment
if self.last_value is None:
_lowercase = _lowercase = time.time()
_lowercase = _lowercase = value
_lowercase = _lowercase = None
_lowercase = self.warmup
_lowercase = 1
self.update_bar(__A )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase = time.time()
_lowercase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase = self.elapsed_time / (value - self.start_value)
else:
_lowercase = None
if value >= self.total:
_lowercase = self.total
_lowercase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase = self.average_time_per_item * (self.total - value)
self.update_bar(__A )
_lowercase = value
_lowercase = current_time
if self.average_time_per_item is None:
_lowercase = 1
else:
_lowercase = max(int(self.update_every / self.average_time_per_item ) , 1 )
def snake_case ( self : Optional[int] , __A : List[str] , __A : Tuple=None ):
"""simple docstring"""
_lowercase = " " * (len(str(self.total ) ) - len(str(__A ) )) + str(__A )
if self.elapsed_time is None:
_lowercase = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
_lowercase = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
_lowercase = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase = disp.display(disp.HTML(self.html_code ) , display_id=__A )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case ( self : List[str] ):
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , __A : Optional[int] , __A : Optional[Any]=None ):
"""simple docstring"""
super().__init__(__A )
_lowercase = None if column_names is None else [column_names]
_lowercase = None
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase = disp.display(disp.HTML(self.html_code ) , display_id=__A )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case ( self : Optional[int] , __A : List[Any] ):
"""simple docstring"""
if self.inner_table is None:
_lowercase = [list(values.keys() ), list(values.values() )]
else:
_lowercase = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__A )
_lowercase = columns
self.inner_table.append([values[c] for c in columns] )
def snake_case ( self : List[str] , __A : Dict , __A : Optional[Any]=None , __A : Optional[Any]=3_0_0 ):
"""simple docstring"""
_lowercase = NotebookProgressBar(__A , prefix=__A , parent=self , width=__A )
return self.child_bar
def snake_case ( self : int ):
"""simple docstring"""
_lowercase = None
self.display()
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
_lowercase = None
_lowercase = None
_lowercase = False
def snake_case ( self : List[str] , __A : Optional[int] , __A : Tuple , __A : Union[str, Any] , **__A : List[Any] ):
"""simple docstring"""
_lowercase = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
_lowercase = 0
_lowercase = 0
_lowercase = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
_lowercase = NotebookTrainingTracker(state.max_steps , __A )
def snake_case ( self : int , __A : Optional[int] , __A : Any , __A : Optional[Any] , **__A : Any ):
"""simple docstring"""
_lowercase = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
_lowercase = False
def snake_case ( self : List[Any] , __A : List[str] , __A : List[str] , __A : Union[str, Any] , __A : int=None , **__A : Optional[int] ):
"""simple docstring"""
if not has_length(__A ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase = self.training_tracker.add_child(len(__A ) )
else:
_lowercase = NotebookProgressBar(len(__A ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def snake_case ( self : Optional[int] , __A : Tuple , __A : Tuple , __A : str , **__A : Optional[Any] ):
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase = None
def snake_case ( self : Union[str, Any] , __A : int , __A : Dict , __A : str , __A : Optional[int]=None , **__A : Optional[int] ):
"""simple docstring"""
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase = state.global_step
self.training_tracker.write_line(__A )
def snake_case ( self : str , __A : Optional[Any] , __A : List[str] , __A : Optional[int] , __A : Dict=None , **__A : List[Any] ):
"""simple docstring"""
if self.training_tracker is not None:
_lowercase = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
_lowercase = log["loss"]
break
if self.first_column == "Epoch":
_lowercase = int(state.epoch )
else:
_lowercase = state.global_step
_lowercase = "eval"
for k in metrics:
if k.endswith("_loss" ):
_lowercase = re.sub(R"\_loss$" , "" , __A )
_lowercase = metrics.pop("total_flos" , __A )
_lowercase = metrics.pop("epoch" , __A )
_lowercase = metrics.pop(f"""{metric_key_prefix}_runtime""" , __A )
_lowercase = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , __A )
_lowercase = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , __A )
_lowercase = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , __A )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
_lowercase = v
else:
_lowercase = k.split("_" )
_lowercase = " ".join([part.capitalize() for part in splits[1:]] )
_lowercase = v
self.training_tracker.write_line(__A )
self.training_tracker.remove_child()
_lowercase = None
# Evaluation takes a long time so we should force the next update.
_lowercase = True
def snake_case ( self : Optional[int] , __A : Optional[int] , __A : Any , __A : Optional[int] , **__A : Any ):
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=__A )
_lowercase = None
| 497 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
__magic_name__ : Any = logging.get_logger(__name__)
__magic_name__ : Dict = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def A__ ( A_ , A_ ) -> List[str]:
_lowercase = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
_lowercase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_lowercase = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=A_ , output_all_encodings=A_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , A_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_lowercase = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
_lowercase = os.path.join(get_home_dir() , "models" )
_lowercase = _load_vocab(A_ , A_ , A_ , cls=A_ )
_lowercase = nlp.model.BERTModel(
A_ , len(A_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=A_ , use_token_type_embed=A_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=A_ , use_decoder=A_ , )
original_bort.load_parameters(A_ , cast_dtype=A_ , ignore_extra=A_ )
_lowercase = original_bort._collect_params_with_prefix()
# Build our config 🤗
_lowercase = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(A_ ),
}
_lowercase = BertConfig.from_dict(A_ )
_lowercase = BertForMaskedLM(A_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(A_ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(A_ , A_ ):
_lowercase = hf_param.shape
_lowercase = to_torch(params[gluon_param] )
_lowercase = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
_lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
_lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
_lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
_lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_lowercase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_lowercase = hf_bort_model.bert.encoder.layer[i]
# self attention
_lowercase = layer.attention.self
_lowercase = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
_lowercase = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
_lowercase = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
_lowercase = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
_lowercase = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
_lowercase = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
_lowercase = layer.attention.output
_lowercase = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
_lowercase = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
_lowercase = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
_lowercase = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
_lowercase = layer.intermediate
_lowercase = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
_lowercase = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
_lowercase = layer.output
_lowercase = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
_lowercase = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
_lowercase = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
_lowercase = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_lowercase = RobertaTokenizer.from_pretrained("roberta-base" )
_lowercase = tokenizer.encode_plus(A_ )["input_ids"]
# Get gluon output
_lowercase = mx.nd.array([input_ids] )
_lowercase = original_bort(inputs=A_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(A_ )
_lowercase = BertModel.from_pretrained(A_ )
hf_bort_model.eval()
_lowercase = tokenizer.encode_plus(A_ , return_tensors="pt" )
_lowercase = hf_bort_model(**A_ )[0]
_lowercase = output_gluon[0].asnumpy()
_lowercase = output_hf[0].detach().numpy()
_lowercase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_lowercase = np.allclose(A_ , A_ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , A_ )
if __name__ == "__main__":
__magic_name__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ : Optional[int] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 497 | 1 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :torch.FloatTensor
A :Optional[torch.FloatTensor] = None
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase=0.9_9_9 , __UpperCamelCase="cosine" , ) -> Tuple:
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
a__ : Union[str, Any] = []
for i in range(__UpperCamelCase ):
a__ : List[str] = i / num_diffusion_timesteps
a__ : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) , __UpperCamelCase ) )
return torch.tensor(__UpperCamelCase , dtype=torch.floataa )
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :List[Any] = 1
@register_to_config
def __init__( self , __UpperCAmelCase = 1000 , __UpperCAmelCase = 0.0_0_0_1 , __UpperCAmelCase = 0.0_2 , __UpperCAmelCase = "linear" , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = 0 , __UpperCAmelCase = "epsilon" , __UpperCAmelCase = 1.0 , **__UpperCAmelCase , ):
"""simple docstring"""
if kwargs.get("set_alpha_to_one" , __UpperCAmelCase ) is not None:
a__ : Tuple = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , __UpperCAmelCase , standard_warn=__UpperCAmelCase )
a__ : Any = kwargs["set_alpha_to_one"]
if trained_betas is not None:
a__ : str = torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
a__ : Optional[int] = torch.linspace(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ : Tuple = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ : str = betas_for_alpha_bar(__UpperCAmelCase )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
a__ : str = 1.0 - self.betas
a__ : List[str] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
a__ : List[str] = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
a__ : Optional[Any] = 1.0
# setable values
a__ : Optional[int] = None
a__ : int = torch.from_numpy(np.arange(0 , __UpperCAmelCase ).copy().astype(np.intaa ) )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
return sample
def _A ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
f' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
f' maximal {self.config.num_train_timesteps} timesteps.' )
a__ : Optional[int] = num_inference_steps
a__ : Dict = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a__ : Union[str, Any] = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
a__ : Tuple = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
self.timesteps += self.config.steps_offset
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.0 , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , ):
"""simple docstring"""
a__ : Dict = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
a__ : Dict = self.alphas_cumprod[timestep]
a__ : List[str] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
a__ : List[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
a__ : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
a__ : int = model_output
elif self.config.prediction_type == "sample":
a__ : Dict = model_output
a__ : Dict = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
a__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
a__ : Union[str, Any] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
a__ : Optional[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ : Any = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ : Optional[int] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__UpperCAmelCase , pred_original_sample=__UpperCAmelCase )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 207 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCamelCase = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _a ( unittest.TestCase ):
'''simple docstring'''
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None ):
"""simple docstring"""
a__ : Optional[Any] = None
a__ : Dict = os.path.abspath(os.path.join("examples" , "by_feature" ) )
a__ : Tuple = os.path.abspath("examples" )
for item in os.listdir(__UpperCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
a__ : Optional[int] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if os.path.isfile(__UpperCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__UpperCAmelCase , feature_script=__UpperCAmelCase , tested_section="main()" if parser_only else "training_function()" , ):
a__ : List[Any] = compare_against_test(
os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a__ : Optional[Any] = "\n".join(__UpperCAmelCase )
if special_strings is not None:
for string in special_strings:
a__ : Optional[Any] = diff.replace(__UpperCAmelCase , "" )
self.assertEqual(__UpperCAmelCase , "" )
def _A ( self ):
"""simple docstring"""
self.one_complete_example("complete_nlp_example.py" , __UpperCAmelCase )
self.one_complete_example("complete_nlp_example.py" , __UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
a__ : int = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.one_complete_example("complete_cv_example.py" , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :int = False
@classmethod
def _A ( cls ):
"""simple docstring"""
super().setUpClass()
a__ : Optional[Any] = tempfile.mkdtemp()
a__ : Tuple = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
a__ : Any = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _A ( cls ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _A ( self ):
"""simple docstring"""
a__ : Dict = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def _A ( self ):
"""simple docstring"""
a__ : Union[str, Any] = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
a__ : Optional[int] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def _A ( self ):
"""simple docstring"""
a__ : Dict = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
a__ : List[str] = run_command(self._launch_args + testargs , return_stdout=__UpperCAmelCase )
self.assertNotIn("epoch 0:" , __UpperCAmelCase )
self.assertIn("epoch 1:" , __UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
a__ : List[str] = run_command(self._launch_args + testargs , return_stdout=__UpperCAmelCase )
if torch.cuda.is_available():
a__ : List[str] = torch.cuda.device_count()
else:
a__ : str = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , __UpperCAmelCase )
self.assertIn("epoch 1:" , __UpperCAmelCase )
else:
self.assertIn("epoch 0:" , __UpperCAmelCase )
self.assertIn("epoch 1:" , __UpperCAmelCase )
@slow
def _A ( self ):
"""simple docstring"""
a__ : List[str] = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
a__ : Any = run_command(self._launch_args + testargs , return_stdout=__UpperCAmelCase )
a__ : List[str] = re.findall("({.+})" , __UpperCAmelCase )
a__ : str = [r for r in results if "accuracy" in r][-1]
a__ : Optional[int] = ast.literal_eval(__UpperCAmelCase )
self.assertGreaterEqual(results["accuracy"] , 0.7_5 )
def _A ( self ):
"""simple docstring"""
a__ : Any = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _A ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
a__ : Tuple = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , "tracking" ) ) )
def _A ( self ):
"""simple docstring"""
a__ : List[str] = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 207 | 1 |
import argparse
import os
import re
import packaging.version
_lowerCamelCase = 'examples/'
_lowerCamelCase = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_lowerCamelCase = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
_lowerCamelCase = 'README.md'
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Any ) -> List[str]:
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.read()
UpperCAmelCase_ , UpperCAmelCase_ = REPLACE_PATTERNS[pattern]
UpperCAmelCase_ = replace.replace('''VERSION''' , __UpperCamelCase )
UpperCAmelCase_ = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Any:
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern='''examples''' )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
UpperCAmelCase_ = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase_ = '''1. Want to contribute a new model?'''
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.readlines()
# Find the start of the list.
UpperCAmelCase_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase_ = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase_ = f.read()
UpperCAmelCase_ = REPLACE_PATTERNS['''init'''][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str=False ) -> Union[str, Any]:
UpperCAmelCase_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase_ = default_version.base_version
elif patch:
UpperCAmelCase_ = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
UpperCAmelCase_ = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
UpperCAmelCase_ = input(f'Which version are you releasing? [{default_version}]' )
if len(__UpperCamelCase ) == 0:
UpperCAmelCase_ = default_version
print(f'Updating version to {version}.' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
UpperCAmelCase_ = get_version()
UpperCAmelCase_ = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
UpperCAmelCase_ = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase_ = input(f'Which version are we developing now? [{dev_version}]' )
if len(__UpperCamelCase ) == 0:
UpperCAmelCase_ = dev_version
print(f'Updating version to {version}.' )
global_version_update(__UpperCamelCase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_lowerCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 144 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : str ) -> Optional[Any]:
if isinstance(__UpperCamelCase , torch.Tensor ):
return image
elif isinstance(__UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCAmelCase_ = np.concatenate(__UpperCamelCase , axis=0 )
UpperCAmelCase_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
UpperCAmelCase_ = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase_ = 2.0 * image - 1.0
UpperCAmelCase_ = torch.from_numpy(__UpperCamelCase )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ = torch.cat(__UpperCamelCase , dim=0 )
return image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[int]=0.9_995 ) -> List[str]:
if not isinstance(__UpperCamelCase , np.ndarray ):
UpperCAmelCase_ = True
UpperCAmelCase_ = va.device
UpperCAmelCase_ = va.cpu().numpy()
UpperCAmelCase_ = va.cpu().numpy()
UpperCAmelCase_ = np.sum(va * va / (np.linalg.norm(__UpperCamelCase ) * np.linalg.norm(__UpperCamelCase )) )
if np.abs(__UpperCamelCase ) > DOT_THRESHOLD:
UpperCAmelCase_ = (1 - t) * va + t * va
else:
UpperCAmelCase_ = np.arccos(__UpperCamelCase )
UpperCAmelCase_ = np.sin(__UpperCamelCase )
UpperCAmelCase_ = theta_a * t
UpperCAmelCase_ = np.sin(__UpperCamelCase )
UpperCAmelCase_ = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCAmelCase_ = sin_theta_t / sin_theta_a
UpperCAmelCase_ = sa * va + sa * va
if inputs_are_torch:
UpperCAmelCase_ = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
return va
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> Optional[int]:
UpperCAmelCase_ = F.normalize(__UpperCamelCase , dim=-1 )
UpperCAmelCase_ = F.normalize(__UpperCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ) -> Optional[int]:
for param in model.parameters():
UpperCAmelCase_ = value
class a ( _A ):
'''simple docstring'''
def __init__( self : List[str] , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : Dict=None , __snake_case : List[str]=None , __snake_case : Optional[Any]=None , ):
super().__init__()
self.register_modules(
vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , )
UpperCAmelCase_ = (
feature_extractor.size
if isinstance(feature_extractor.size , __snake_case )
else feature_extractor.size['''shortest_edge''']
)
UpperCAmelCase_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __snake_case )
set_requires_grad(self.clip_model , __snake_case )
def lowerCamelCase_ ( self : Dict , __snake_case : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def lowerCamelCase_ ( self : str ):
self.enable_attention_slicing(__snake_case )
def lowerCamelCase_ ( self : List[Any] ):
set_requires_grad(self.vae , __snake_case )
def lowerCamelCase_ ( self : int ):
set_requires_grad(self.vae , __snake_case )
def lowerCamelCase_ ( self : str ):
set_requires_grad(self.unet , __snake_case )
def lowerCamelCase_ ( self : List[Any] ):
set_requires_grad(self.unet , __snake_case )
def lowerCamelCase_ ( self : Any , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[str] ):
# get the original timestep using init_timestep
UpperCAmelCase_ = min(int(num_inference_steps * strength ) , __snake_case )
UpperCAmelCase_ = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self : int , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : str=None ):
if not isinstance(__snake_case , torch.Tensor ):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(__snake_case )}' )
UpperCAmelCase_ = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case )
]
UpperCAmelCase_ = torch.cat(__snake_case , dim=0 )
else:
UpperCAmelCase_ = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase_ = 0.18_215 * init_latents
UpperCAmelCase_ = init_latents.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase_ = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
UpperCAmelCase_ = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
UpperCAmelCase_ = init_latents
return latents
def lowerCamelCase_ ( self : Dict , __snake_case : Dict ):
UpperCAmelCase_ = self.coca_transform(__snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCAmelCase_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCAmelCase_ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def lowerCamelCase_ ( self : Optional[int] , __snake_case : Any , __snake_case : List[Any] ):
UpperCAmelCase_ = self.feature_extractor.preprocess(__snake_case )
UpperCAmelCase_ = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCAmelCase_ = self.clip_model.get_image_features(__snake_case )
UpperCAmelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowerCamelCase_ ( self : Tuple , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Union[str, Any] , ):
UpperCAmelCase_ = latents.detach().requires_grad_()
UpperCAmelCase_ = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
UpperCAmelCase_ = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCAmelCase_ = self.scheduler.alphas_cumprod[timestep]
UpperCAmelCase_ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCAmelCase_ = torch.sqrt(__snake_case )
UpperCAmelCase_ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __snake_case ):
UpperCAmelCase_ = self.scheduler.sigmas[index]
UpperCAmelCase_ = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase_ = 1 / 0.18_215 * sample
UpperCAmelCase_ = self.vae.decode(__snake_case ).sample
UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ = transforms.Resize(self.feature_extractor_size )(__snake_case )
UpperCAmelCase_ = self.normalize(__snake_case ).to(latents.dtype )
UpperCAmelCase_ = self.clip_model.get_image_features(__snake_case )
UpperCAmelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale
UpperCAmelCase_ = -torch.autograd.grad(__snake_case , __snake_case )[0]
if isinstance(self.scheduler , __snake_case ):
UpperCAmelCase_ = latents.detach() + grads * (sigma**2)
UpperCAmelCase_ = noise_pred_original
else:
UpperCAmelCase_ = noise_pred_original - torch.sqrt(__snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Tuple , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ):
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(__snake_case , torch.Generator ) and batch_size > 1:
UpperCAmelCase_ = [generator] + [None] * (batch_size - 1)
UpperCAmelCase_ = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
UpperCAmelCase_ = [x[0] for x in coca_is_none if x[1]]
UpperCAmelCase_ = ''', '''.join(__snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__snake_case ):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
UpperCAmelCase_ = self.get_image_description(__snake_case )
if style_prompt is None:
if len(__snake_case ):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
UpperCAmelCase_ = self.get_image_description(__snake_case )
# get prompt text embeddings for content and style
UpperCAmelCase_ = self.tokenizer(
__snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='''pt''' , )
UpperCAmelCase_ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase_ = self.tokenizer(
__snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='''pt''' , )
UpperCAmelCase_ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase_ = slerp(__snake_case , __snake_case , __snake_case )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase_ = text_embeddings.repeat_interleave(__snake_case , dim=0 )
# set timesteps
UpperCAmelCase_ = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCAmelCase_ = {}
if accepts_offset:
UpperCAmelCase_ = 1
self.scheduler.set_timesteps(__snake_case , **__snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCAmelCase_ , UpperCAmelCase_ = self.get_timesteps(__snake_case , __snake_case , self.device )
UpperCAmelCase_ = timesteps[:1].repeat(__snake_case )
# Preprocess image
UpperCAmelCase_ = preprocess(__snake_case , __snake_case , __snake_case )
UpperCAmelCase_ = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
UpperCAmelCase_ = preprocess(__snake_case , __snake_case , __snake_case )
UpperCAmelCase_ = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
UpperCAmelCase_ = slerp(__snake_case , __snake_case , __snake_case )
if clip_guidance_scale > 0:
UpperCAmelCase_ = self.get_clip_image_embeddings(__snake_case , __snake_case )
UpperCAmelCase_ = self.get_clip_image_embeddings(__snake_case , __snake_case )
UpperCAmelCase_ = slerp(
__snake_case , __snake_case , __snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_ = content_text_input.input_ids.shape[-1]
UpperCAmelCase_ = self.tokenizer([''''''] , padding='''max_length''' , max_length=__snake_case , return_tensors='''pt''' )
UpperCAmelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCAmelCase_ = uncond_embeddings.repeat_interleave(__snake_case , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCAmelCase_ = torch.randn(__snake_case , generator=__snake_case , device='''cpu''' , dtype=__snake_case ).to(
self.device )
else:
UpperCAmelCase_ = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
UpperCAmelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_ = {}
if accepts_eta:
UpperCAmelCase_ = eta
# check if the scheduler accepts generator
UpperCAmelCase_ = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCAmelCase_ = generator
with self.progress_bar(total=__snake_case ):
for i, t in enumerate(__snake_case ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
UpperCAmelCase_ = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.chunk(2 )
UpperCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCAmelCase_ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCAmelCase_ , UpperCAmelCase_ = self.cond_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase_ = 1 / 0.18_215 * latents
UpperCAmelCase_ = self.vae.decode(__snake_case ).sample
UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
| 144 | 1 |
def _UpperCamelCase ( UpperCamelCase_ : str ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(UpperCamelCase_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 717 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCamelCase__ ( *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
pass
def _UpperCamelCase ( UpperCamelCase_ : Tuple ) -> Any:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__snake_case : List[str] = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
_SCREAMING_SNAKE_CASE : Dict = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' , model=_UpperCamelCase , tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = list(zip(*apply_tesseract(load_image(_UpperCamelCase ) , _UpperCamelCase , '' ) ) )
lowerCAmelCase__ = 'What is the placebo?'
lowerCAmelCase__ = [
{
'image': load_image(_UpperCamelCase ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = dqa_pipeline(_UpperCamelCase , top_k=2 )
self.assertEqual(
_UpperCamelCase , [
[
{'score': ANY(_UpperCamelCase ), 'answer': ANY(_UpperCamelCase ), 'start': ANY(_UpperCamelCase ), 'end': ANY(_UpperCamelCase )},
{'score': ANY(_UpperCamelCase ), 'answer': ANY(_UpperCamelCase ), 'start': ANY(_UpperCamelCase ), 'end': ANY(_UpperCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'How many cats are there?'
lowerCAmelCase__ = [
{'score': 0.00_01, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.00_01, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(_UpperCamelCase , decimals=4 ) , _UpperCamelCase )
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(_UpperCamelCase , decimals=4 ) , _UpperCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(_UpperCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , words=_UpperCamelCase , boxes=_UpperCamelCase , top_k=2 )
self.assertEqual(_UpperCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_UpperCamelCase )
lowerCAmelCase__ = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_UpperCamelCase , revision='3dc6de3' , )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
lowerCAmelCase__ = list(zip(*apply_tesseract(load_image(_UpperCamelCase ) , _UpperCamelCase , '' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_UpperCamelCase )
lowerCAmelCase__ = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_UpperCamelCase , revision='3dc6de3' , max_seq_len=50 , )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
lowerCAmelCase__ = list(zip(*apply_tesseract(load_image(_UpperCamelCase ) , _UpperCamelCase , '' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(_UpperCamelCase , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
| 365 | 0 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class a ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : int = inspect.getfile(accelerate.test_utils )
lowerCAmelCase : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
lowerCAmelCase : int = ['accelerate', 'launch']
lowerCAmelCase : str = Path.home() / '.cache/huggingface/accelerate'
lowerCAmelCase : List[str] = 'default_config.yaml'
lowerCAmelCase : Union[str, Any] = config_folder / config_file
lowerCAmelCase : Dict = config_folder / '_default_config.yaml'
lowerCAmelCase : Any = Path('tests/test_configs' )
@classmethod
def lowerCamelCase_ ( cls : int ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCamelCase_ ( cls : Tuple ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase_ ( self : Optional[int] ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=__snake_case ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(__snake_case ), self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase_ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class a ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : str = 'test-tpu'
lowerCAmelCase : Optional[Any] = 'us-central1-a'
lowerCAmelCase : int = 'ls'
lowerCAmelCase : Any = ['accelerate', 'tpu-config']
lowerCAmelCase : Any = 'cd /usr/share'
lowerCAmelCase : Tuple = 'tests/test_samples/test_command_file.sh'
lowerCAmelCase : List[str] = 'Running gcloud compute tpus tpu-vm ssh'
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , __snake_case , )
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , __snake_case , )
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__snake_case )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , __snake_case , )
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , __snake_case , )
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' , __snake_case , )
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , __snake_case , )
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , __snake_case , )
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' , __snake_case , )
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' , __snake_case , )
| 144 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def lowerCamelCase_ ( self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def lowerCamelCase_ ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
UpperCAmelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = AudioDiffusionPipeline(vqvae=__snake_case , unet=self.dummy_unet , mel=__snake_case , scheduler=__snake_case )
UpperCAmelCase_ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(42 )
UpperCAmelCase_ = pipe(generator=__snake_case , steps=4 )
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(42 )
UpperCAmelCase_ = pipe(generator=__snake_case , steps=4 , return_dict=__snake_case )
UpperCAmelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCAmelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
UpperCAmelCase_ = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCAmelCase_ = DDIMScheduler()
UpperCAmelCase_ = self.dummy_vqvae_and_unet
UpperCAmelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__snake_case , scheduler=__snake_case )
UpperCAmelCase_ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
np.random.seed(0 )
UpperCAmelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(42 )
UpperCAmelCase_ = pipe(raw_audio=__snake_case , generator=__snake_case , start_step=5 , steps=10 )
UpperCAmelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCAmelCase_ = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_ = self.dummy_unet_condition
UpperCAmelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__snake_case , mel=__snake_case , scheduler=__snake_case )
UpperCAmelCase_ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
np.random.seed(0 )
UpperCAmelCase_ = torch.rand((1, 1, 10) )
UpperCAmelCase_ = pipe(generator=__snake_case , encoding=__snake_case )
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCAmelCase_ = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = torch_device
UpperCAmelCase_ = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
UpperCAmelCase_ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(42 )
UpperCAmelCase_ = pipe(generator=__snake_case )
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCAmelCase_ = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 144 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=0 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = projection_dim
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices)
lowerCAmelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
lowerCAmelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = TFDPRContextEncoder(config=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = TFDPRQuestionEncoder(config=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = TFDPRReader(config=__lowerCAmelCase)
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class a__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ : Dict = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Union[str, Any] = False
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TFDPRModelTester(self)
lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFDPRQuestionEncoder.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFDPRReader.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
@require_tf
class a__( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""")
lowerCAmelCase = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]]) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase = model(__lowerCAmelCase)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4))
| 605 | '''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def snake_case__ ( _A: float ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
return quad(_A , 0 , _A , args=(_A) )[0]
def snake_case__ ( _A: float , _A: float ) -> float:
'''simple docstring'''
return math.pow(_A , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 605 | 1 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir("fixtures/test_sentencepiece.model")
_a : Dict = {"target_lang": "fi", "source_lang": "en"}
_a : Optional[int] = ">>zh<<"
_a : List[str] = "Helsinki-NLP/"
if is_torch_available():
_a : List[str] = "pt"
elif is_tf_available():
_a : Dict = "tf"
else:
_a : Union[str, Any] = "jax"
@require_sentencepiece
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = MarianTokenizer
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : int ) -> int:
super().setUp()
__snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def a ( self : int ) -> Optional[Any]:
__snake_case = '</s>'
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> List[str]:
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def a ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a ( self : Any ) -> Optional[int]:
__snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
__snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a ( self : Tuple ) -> Dict:
__snake_case = self.get_tokenizer()
__snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a ( self : int ) -> int:
# fmt: off
__snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a ( self : Dict ) -> str:
__snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__snake_case = 'Tämä on testi'
__snake_case = 'This is a test'
__snake_case = [76, 7, 2047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase_ ( self ):
__A , __A : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=_A , dtype=jnp.bfloataa )
__A , __A : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_A , from_pt=_A , dtype=jnp.bfloataa )
__A : Optional[Any] = controlnet_params
__A : Optional[int] = 'bird'
__A : List[str] = jax.device_count()
__A : Any = pipe.prepare_text_inputs([prompts] * num_samples )
__A : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
__A : List[str] = pipe.prepare_image_inputs([canny_image] * num_samples )
__A : List[str] = jax.random.PRNGKey(0 )
__A : List[str] = jax.random.split(_A , jax.device_count() )
__A : int = replicate(_A )
__A : Optional[Any] = shard(_A )
__A : List[str] = shard(_A )
__A : str = pipe(
prompt_ids=_A , image=_A , params=_A , prng_seed=_A , num_inference_steps=50 , jit=_A , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__A : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__A : Dict = images[0, 253:256, 253:256, -1]
__A : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A : List[str] = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
__A , __A : List[Any] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=_A , dtype=jnp.bfloataa )
__A , __A : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=_A , from_pt=_A , dtype=jnp.bfloataa )
__A : Optional[int] = controlnet_params
__A : Tuple = 'Chef in the kitchen'
__A : Optional[int] = jax.device_count()
__A : Optional[int] = pipe.prepare_text_inputs([prompts] * num_samples )
__A : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
__A : Dict = pipe.prepare_image_inputs([pose_image] * num_samples )
__A : int = jax.random.PRNGKey(0 )
__A : Dict = jax.random.split(_A , jax.device_count() )
__A : str = replicate(_A )
__A : Union[str, Any] = shard(_A )
__A : List[Any] = shard(_A )
__A : Any = pipe(
prompt_ids=_A , image=_A , params=_A , prng_seed=_A , num_inference_steps=50 , jit=_A , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__A : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__A : List[Any] = images[0, 253:256, 253:256, -1]
__A : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A : Optional[Any] = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 239 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class lowerCAmelCase__ :
def __init__( self : str ) -> None:
"""simple docstring"""
lowerCamelCase_ : list[Any] = []
lowerCamelCase_ : int = 0
lowerCamelCase_ : int = 0
def __UpperCamelCase ( self : str ) -> bool:
"""simple docstring"""
return self.head == self.tail
def __UpperCamelCase ( self : Any , UpperCamelCase_ : Any ) -> None:
"""simple docstring"""
self.data.append(UpperCamelCase_ )
lowerCamelCase_ : str = self.tail + 1
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Tuple = self.data[self.head]
lowerCamelCase_ : Optional[Any] = self.head + 1
return ret
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return self.tail - self.head
def __UpperCamelCase ( self : Optional[int] ) -> None:
"""simple docstring"""
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class lowerCAmelCase__ :
def __init__( self : Dict , UpperCamelCase_ : Any ) -> None:
"""simple docstring"""
lowerCamelCase_ : Any = data
lowerCamelCase_ : MyNode | None = None
lowerCamelCase_ : MyNode | None = None
lowerCamelCase_ : int = 1
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.data
def __UpperCamelCase ( self : Tuple ) -> MyNode | None:
"""simple docstring"""
return self.left
def __UpperCamelCase ( self : Any ) -> MyNode | None:
"""simple docstring"""
return self.right
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
return self.height
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Any ) -> None:
"""simple docstring"""
lowerCamelCase_ : List[str] = data
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : MyNode | None ) -> None:
"""simple docstring"""
lowerCamelCase_ : List[str] = node
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : MyNode | None ) -> None:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = node
def __UpperCamelCase ( self : str , UpperCamelCase_ : int ) -> None:
"""simple docstring"""
lowerCamelCase_ : List[str] = height
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if a > b:
return a
return b
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
print('''left rotation node:''' , node.get_data() )
lowerCamelCase_ : Optional[int] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__UpperCAmelCase )
lowerCamelCase_ : Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCAmelCase )
return ret
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
print('''right rotation node:''' , node.get_data() )
lowerCamelCase_ : Dict = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__UpperCAmelCase )
lowerCamelCase_ : Dict = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCAmelCase )
return ret
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : int = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__UpperCAmelCase ) )
return right_rotation(__UpperCAmelCase )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Any = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__UpperCAmelCase ) )
return left_rotation(__UpperCAmelCase )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if node is None:
return MyNode(__UpperCAmelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __UpperCAmelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowerCamelCase_ : Dict = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowerCamelCase_ : int = right_rotation(__UpperCAmelCase )
else:
lowerCamelCase_ : List[str] = lr_rotation(__UpperCAmelCase )
else:
node.set_right(insert_node(node.get_right() , __UpperCAmelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowerCamelCase_ : str = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowerCamelCase_ : int = rl_rotation(__UpperCAmelCase )
else:
lowerCamelCase_ : Tuple = left_rotation(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCAmelCase )
return node
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
while True:
lowerCamelCase_ : List[str] = root.get_right()
if right_child is None:
break
lowerCamelCase_ : Optional[Any] = right_child
return root.get_data()
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
while True:
lowerCamelCase_ : Tuple = root.get_left()
if left_child is None:
break
lowerCamelCase_ : List[str] = left_child
return root.get_data()
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : List[Any] = root.get_left()
lowerCamelCase_ : Tuple = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowerCamelCase_ : Tuple = get_left_most(__UpperCAmelCase )
root.set_data(__UpperCAmelCase )
root.set_right(del_node(__UpperCAmelCase , __UpperCAmelCase ) )
elif left_child is not None:
lowerCamelCase_ : List[str] = left_child
elif right_child is not None:
lowerCamelCase_ : Union[str, Any] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(__UpperCAmelCase , __UpperCAmelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__UpperCAmelCase , __UpperCAmelCase ) )
if get_height(__UpperCAmelCase ) - get_height(__UpperCAmelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowerCamelCase_ : List[Any] = left_rotation(__UpperCAmelCase )
else:
lowerCamelCase_ : List[Any] = rl_rotation(__UpperCAmelCase )
elif get_height(__UpperCAmelCase ) - get_height(__UpperCAmelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowerCamelCase_ : Tuple = right_rotation(__UpperCAmelCase )
else:
lowerCamelCase_ : List[Any] = lr_rotation(__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__UpperCAmelCase )
return root
class lowerCAmelCase__ :
def __init__( self : Tuple ) -> None:
"""simple docstring"""
lowerCamelCase_ : MyNode | None = None
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
return get_height(self.root )
def __UpperCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Any ) -> None:
"""simple docstring"""
print('''insert:''' + str(UpperCamelCase_ ) )
lowerCamelCase_ : str = insert_node(self.root , UpperCamelCase_ )
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Any ) -> None:
"""simple docstring"""
print('''delete:''' + str(UpperCamelCase_ ) )
if self.root is None:
print('''Tree is empty!''' )
return
lowerCamelCase_ : Any = del_node(self.root , UpperCamelCase_ )
def __str__( self : int , ) -> str: # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
lowerCamelCase_ : int = ''''''
lowerCamelCase_ : Optional[int] = MyQueue()
q.push(self.root )
lowerCamelCase_ : Optional[Any] = self.get_height()
if layer == 0:
return output
lowerCamelCase_ : int = 0
while not q.is_empty():
lowerCamelCase_ : Tuple = q.pop()
lowerCamelCase_ : int = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCamelCase_ )
q.push(UpperCamelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowerCamelCase_ : Optional[Any] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , UpperCamelCase_ ) - 1:
lowerCamelCase_ : str = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __snake_case ():
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__lowerCamelCase : str = AVLtree()
__lowerCamelCase : Dict = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 702 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __snake_case (__UpperCAmelCase="" ):
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = tempfile.mkdtemp()
return os.path.join(__UpperCAmelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ : List[str] = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCamelCase_ : List[str] = AgentAudio(UpperCamelCase_ )
lowerCamelCase_ : int = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
# Ensure that the file contains the same value as the original tensor
lowerCamelCase_ , lowerCamelCase_ : int = sf.read(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , torch.tensor(UpperCamelCase_ ) , atol=1e-4 ) )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCamelCase_ : Tuple = get_new_path(suffix='''.wav''' )
sf.write(UpperCamelCase_ , UpperCamelCase_ , 16_000 )
lowerCamelCase_ : Any = AgentAudio(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , UpperCamelCase_ )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Any = torch.randint(0 , 256 , (64, 64, 3) )
lowerCamelCase_ : str = AgentImage(UpperCamelCase_ )
lowerCamelCase_ : int = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowerCamelCase_ : List[str] = Image.open(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = AgentImage(UpperCamelCase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowerCamelCase_ : List[str] = Image.open(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = AgentImage(UpperCamelCase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : int = '''Hey!'''
lowerCamelCase_ : Tuple = AgentText(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , agent_type.to_string() )
self.assertEqual(UpperCamelCase_ , agent_type.to_raw() )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 418 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowerCamelCase__ : Tuple = """bart"""
lowerCamelCase__ : Dict = True
@st.cache(allow_output_mutation=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
snake_case__ = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
snake_case__ = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
snake_case__ = qar_model.eval()
else:
snake_case__ , snake_case__ = (None, None)
if MODEL_TYPE == "bart":
snake_case__ = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
snake_case__ = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
snake_case__ = sas_model.eval()
else:
snake_case__ , snake_case__ = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> int:
if LOAD_DENSE_INDEX:
snake_case__ = faiss.StandardGpuResources()
snake_case__ = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
snake_case__ = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
snake_case__ = faiss.IndexFlatIP(128 )
snake_case__ = faiss.index_cpu_to_gpu(__lowerCAmelCase , 1 , __lowerCAmelCase )
wikiaab_gpu_index_flat.add(__lowerCAmelCase ) # TODO fix for larger GPU
else:
snake_case__ , snake_case__ = (None, None)
snake_case__ = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
snake_case__ = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
snake_case__ = elia['''train_eli5''']
snake_case__ = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
snake_case__ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__lowerCAmelCase )
return (elia_train, eli5_train_q_index)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = load_indexes()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = load_models()
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = load_train_data()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=10 ) -> List[Any]:
snake_case__ = embed_questions_for_retrieval([question] , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ , snake_case__ = eli5_train_q_index.search(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = [elia_train[int(__lowerCAmelCase )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase="wiki40b" , __lowerCAmelCase="dense" , __lowerCAmelCase=10 ) -> int:
if source == "none":
snake_case__ , snake_case__ = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
snake_case__ , snake_case__ = query_qa_dense_index(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
snake_case__ , snake_case__ = query_es_index(
__lowerCAmelCase , __lowerCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=__lowerCAmelCase , )
snake_case__ = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
snake_case__ = '''question: {} context: {}'''.format(__lowerCAmelCase , __lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None),
} )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=64 , __lowerCAmelCase=256 , __lowerCAmelCase=False , __lowerCAmelCase=2 , __lowerCAmelCase=0.95 , __lowerCAmelCase=0.8 ) -> Dict:
with torch.no_grad():
snake_case__ = qa_sas_generate(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_answers=1 , num_beams=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase , do_sample=__lowerCAmelCase , temp=__lowerCAmelCase , top_p=__lowerCAmelCase , top_k=__lowerCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
lowerCamelCase__ : int = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
lowerCamelCase__ : List[Any] = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowerCamelCase__ : Any = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
lowerCamelCase__ : str = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
lowerCamelCase__ : Tuple = st.sidebar.checkbox("""Demo options""")
if demo_options:
lowerCamelCase__ : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
lowerCamelCase__ : Union[str, Any] = action_list.index(action_st)
lowerCamelCase__ : List[Any] = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
lowerCamelCase__ : int = show_type == """Show full text of passages"""
else:
lowerCamelCase__ : str = 3
lowerCamelCase__ : Any = True
lowerCamelCase__ : Tuple = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
lowerCamelCase__ : Optional[int] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
lowerCamelCase__ : Optional[int] = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
lowerCamelCase__ : int = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
lowerCamelCase__ : str = """wiki40b"""
lowerCamelCase__ : str = """dense"""
lowerCamelCase__ : int = """beam"""
lowerCamelCase__ : Optional[int] = 2
lowerCamelCase__ : int = 6_4
lowerCamelCase__ : List[Any] = 2_5_6
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : str = st.sidebar.checkbox("""Generation options""")
if generate_options:
lowerCamelCase__ : int = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
lowerCamelCase__ : List[str] = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
lowerCamelCase__ : Optional[Any] = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
lowerCamelCase__ : Optional[int] = st.sidebar.slider(
"""Maximum generation length""", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
lowerCamelCase__ : Optional[int] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowerCamelCase__ : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
lowerCamelCase__ : Union[str, Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
lowerCamelCase__ : Dict = None
# start main text
lowerCamelCase__ : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
lowerCamelCase__ : Optional[Any] = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowerCamelCase__ : Dict = st.text_input("""Enter your question here:""", """""")
else:
lowerCamelCase__ : Union[str, Any] = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
lowerCamelCase__ , lowerCamelCase__ : List[str] = make_support(question, source=wiki_source, method="""dense""", n_results=1_0)
lowerCamelCase__ , lowerCamelCase__ : Any = make_support(question, source=wiki_source, method="""sparse""", n_results=1_0)
lowerCamelCase__ : Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowerCamelCase__ : Union[str, Any] = support_list[:1_0]
lowerCamelCase__ : Optional[Any] = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
lowerCamelCase__ , lowerCamelCase__ : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
lowerCamelCase__ : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
lowerCamelCase__ : List[Any] = res[1].strip()
if sec_titles == "":
lowerCamelCase__ : int = """[{}]({})""".format(res[0], wiki_url)
else:
lowerCamelCase__ : str = sec_titles.split(""" & """)
lowerCamelCase__ : int = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
lowerCamelCase__ : List[Any] = find_nearest_training(question)
lowerCamelCase__ : int = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
lowerCamelCase__ : Optional[int] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
lowerCamelCase__ : Union[str, Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 33 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = 0
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
snake_case__ = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
snake_case__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
with self.assertRaisesRegex(
_a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 33 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowerCamelCase : List[str] = logging.get_logger(__name__)
# General docstring
__lowerCamelCase : List[str] = '''ResNetConfig'''
# Base docstring
__lowerCamelCase : List[Any] = '''microsoft/resnet-50'''
__lowerCamelCase : Union[str, Any] = [1, 2048, 7, 7]
# Image classification docstring
__lowerCamelCase : List[str] = '''microsoft/resnet-50'''
__lowerCamelCase : int = '''tiger cat'''
__lowerCamelCase : Any = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int = 3 , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :str = "relu" ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Dict = nn.Convad(
UpperCamelCase_ , UpperCamelCase_ , kernel_size=UpperCamelCase_ , stride=UpperCamelCase_ , padding=kernel_size // 2 , bias=UpperCamelCase_ )
snake_case_ : Tuple = nn.BatchNormad(UpperCamelCase_ )
snake_case_ : List[str] = ACTaFN[activation] if activation is not None else nn.Identity()
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.convolution(UpperCamelCase_ )
snake_case_ : Tuple = self.normalization(UpperCamelCase_ )
snake_case_ : Dict = self.activation(UpperCamelCase_ )
return hidden_state
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCAmelCase__ :ResNetConfig ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
snake_case_ : Tuple = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
snake_case_ : Union[str, Any] = config.num_channels
def _A ( self :str , lowerCAmelCase__ :Tensor ) -> Dict:
'''simple docstring'''
snake_case_ : Any = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
snake_case_ : Optional[int] = self.embedder(UpperCamelCase_ )
snake_case_ : int = self.pooler(UpperCamelCase_ )
return embedding
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : str = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 , stride=UpperCamelCase_ , bias=UpperCamelCase_ )
snake_case_ : str = nn.BatchNormad(UpperCamelCase_ )
def _A ( self :int , lowerCAmelCase__ :Tensor ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = self.convolution(UpperCamelCase_ )
snake_case_ : Optional[Any] = self.normalization(UpperCamelCase_ )
return hidden_state
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :str = "relu" ) -> Any:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = in_channels != out_channels or stride != 1
snake_case_ : List[str] = (
ResNetShortCut(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
snake_case_ : Optional[Any] = nn.Sequential(
ResNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ ) , ResNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , activation=UpperCamelCase_ ) , )
snake_case_ : Any = ACTaFN[activation]
def _A ( self :str , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = hidden_state
snake_case_ : str = self.layer(UpperCamelCase_ )
snake_case_ : Dict = self.shortcut(UpperCamelCase_ )
hidden_state += residual
snake_case_ : List[str] = self.activation(UpperCamelCase_ )
return hidden_state
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :str = "relu" , lowerCAmelCase__ :int = 4 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = in_channels != out_channels or stride != 1
snake_case_ : Optional[Any] = out_channels // reduction
snake_case_ : str = (
ResNetShortCut(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
snake_case_ : List[Any] = nn.Sequential(
ResNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 ) , ResNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ ) , ResNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 , activation=UpperCamelCase_ ) , )
snake_case_ : List[str] = ACTaFN[activation]
def _A ( self :int , lowerCAmelCase__ :List[str] ) -> int:
'''simple docstring'''
snake_case_ : Tuple = hidden_state
snake_case_ : Optional[int] = self.layer(UpperCamelCase_ )
snake_case_ : Tuple = self.shortcut(UpperCamelCase_ )
hidden_state += residual
snake_case_ : str = self.activation(UpperCamelCase_ )
return hidden_state
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCAmelCase__ :ResNetConfig , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :int = 2 , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Any = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
snake_case_ : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ , activation=config.hidden_act ) , *[layer(UpperCamelCase_ , UpperCamelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _A ( self :Any , lowerCAmelCase__ :Tensor ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = input
for layer in self.layers:
snake_case_ : List[Any] = layer(UpperCamelCase_ )
return hidden_state
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :Any , lowerCAmelCase__ :ResNetConfig ) -> List[str]:
'''simple docstring'''
super().__init__()
snake_case_ : Any = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
snake_case_ : str = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCamelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , depth=UpperCamelCase_ ) )
def _A ( self :Tuple , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = True ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case_ : Any = hidden_states + (hidden_state,)
snake_case_ : Any = stage_module(UpperCamelCase_ )
if output_hidden_states:
snake_case_ : Any = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase_ , hidden_states=UpperCamelCase_ , )
class A_ (__lowerCamelCase ):
"""simple docstring"""
a__ = ResNetConfig
a__ = 'resnet'
a__ = 'pixel_values'
a__ = True
def _A ( self :List[str] , lowerCAmelCase__ :Any ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCamelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(UpperCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str]=False ) -> str:
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
snake_case_ : List[str] = value
__lowerCamelCase : Dict = R'''\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'''
__lowerCamelCase : str = R'''\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , __lowerCamelCase , )
class A_ (__lowerCamelCase ):
"""simple docstring"""
def __init__( self :int , lowerCAmelCase__ :Optional[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
snake_case_ : str = config
snake_case_ : Optional[int] = ResNetEmbeddings(UpperCamelCase_ )
snake_case_ : List[Any] = ResNetEncoder(UpperCamelCase_ )
snake_case_ : List[str] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _A ( self :Dict , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None ) -> Dict:
'''simple docstring'''
snake_case_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : str = self.embedder(UpperCamelCase_ )
snake_case_ : int = self.encoder(
UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ )
snake_case_ : str = encoder_outputs[0]
snake_case_ : Optional[int] = self.pooler(UpperCamelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase_ , pooler_output=UpperCamelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' , __lowerCamelCase , )
class A_ (__lowerCamelCase ):
"""simple docstring"""
def __init__( self :str , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
snake_case_ : int = config.num_labels
snake_case_ : Tuple = ResNetModel(UpperCamelCase_ )
# classification head
snake_case_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :Optional[torch.LongTensor] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : Optional[Any] = self.resnet(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ )
snake_case_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
snake_case_ : Any = self.classifier(UpperCamelCase_ )
snake_case_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case_ : Optional[int] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case_ : int = "single_label_classification"
else:
snake_case_ : Union[str, Any] = "multi_label_classification"
if self.config.problem_type == "regression":
snake_case_ : Tuple = MSELoss()
if self.num_labels == 1:
snake_case_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case_ : Dict = loss_fct(UpperCamelCase_ , UpperCamelCase_ )
elif self.config.problem_type == "single_label_classification":
snake_case_ : Optional[Any] = CrossEntropyLoss()
snake_case_ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case_ : int = BCEWithLogitsLoss()
snake_case_ : Optional[int] = loss_fct(UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
snake_case_ : Dict = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase_ , logits=UpperCamelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ''' , __lowerCamelCase , )
class A_ (__lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
super()._init_backbone(UpperCamelCase_ )
snake_case_ : Tuple = [config.embedding_size] + config.hidden_sizes
snake_case_ : Any = ResNetEmbeddings(UpperCamelCase_ )
snake_case_ : List[Any] = ResNetEncoder(UpperCamelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@replace_return_docstrings(output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC )
def _A ( self :Dict , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : str = self.embedder(UpperCamelCase_ )
snake_case_ : Optional[Any] = self.encoder(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ )
snake_case_ : str = outputs.hidden_states
snake_case_ : Optional[int] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
snake_case_ : Dict = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCamelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCamelCase_ , )
| 714 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__lowerCamelCase : List[str] = ['''names''', '''prefix''']
__lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines''']
__lowerCamelCase : Optional[Any] = ['''date_format''']
@dataclass
class A_ (datasets.BuilderConfig ):
"""simple docstring"""
a__ = ","
a__ = None
a__ = "infer"
a__ = None
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = None
a__ = None
a__ = None
a__ = None
a__ = False
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = False
a__ = True
a__ = None
a__ = "."
a__ = None
a__ = '"'
a__ = 0
a__ = None
a__ = None
a__ = None
a__ = None
a__ = True
a__ = True
a__ = 0
a__ = True
a__ = False
a__ = None
a__ = 10000
a__ = None
a__ = "strict"
a__ = "error"
a__ = None
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
snake_case_ : Tuple = self.delimiter
if self.column_names is not None:
snake_case_ : List[Any] = self.column_names
@property
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
a__ = CsvConfig
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
snake_case_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : List[str] = [files]
snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ : str = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = [files]
snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
snake_case_ : int = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case_ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' )
raise
| 656 | 0 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
lowercase__ = length or len(SCREAMING_SNAKE_CASE )
lowercase__ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowercase__ , lowercase__ = list_data[i + 1], list_data[i]
lowercase__ = True
return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 1_6
lowerCAmelCase_ = 3_2
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase = 16 ):
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('bert-base-cased' )
A__ = DatasetDict(
{
'train': dataset['train'].select(UpperCAmelCase ),
'validation': dataset['train'].select(UpperCAmelCase ),
'test': dataset['validation'],
} )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=UpperCAmelCase ,max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
UpperCAmelCase ,batched=UpperCAmelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
UpperCAmelCase ,padding='longest' ,max_length=UpperCAmelCase ,pad_to_multiple_of=UpperCAmelCase ,return_tensors='pt' ,)
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['train'] ,shuffle=UpperCAmelCase ,collate_fn=UpperCAmelCase ,batch_size=UpperCAmelCase )
A__ = DataLoader(
tokenized_datasets['validation'] ,shuffle=UpperCAmelCase ,collate_fn=UpperCAmelCase ,batch_size=UpperCAmelCase )
A__ = DataLoader(
tokenized_datasets['test'] ,shuffle=UpperCAmelCase ,collate_fn=UpperCAmelCase ,batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader, test_dataloader
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = []
# Download the dataset
A__ = load_dataset('glue' ,'mrpc' )
# Create our splits
A__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['lr']
A__ = int(config['num_epochs'] )
A__ = int(config['seed'] )
A__ = int(config['batch_size'] )
A__ = evaluate.load('glue' ,'mrpc' )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(UpperCAmelCase )
# New Code #
# Create our folds:
A__ = kfold.split(np.zeros(datasets['train'].num_rows ) ,datasets['train']['label'] )
A__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCAmelCase ):
A__ , A__ , A__ = get_fold_dataloaders(
UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' ,return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() ,lr=UpperCAmelCase )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase ,num_warmup_steps=100 ,num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**UpperCAmelCase )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**UpperCAmelCase )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCAmelCase ,references=UpperCAmelCase ,)
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,UpperCAmelCase )
# New Code #
# We also run predictions on the test set at the very end
A__ = []
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**UpperCAmelCase )
A__ = outputs.logits
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCAmelCase ,dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ = torch.cat(UpperCAmelCase ,dim=0 )
A__ = torch.stack(UpperCAmelCase ,dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ = metric.compute(predictions=UpperCAmelCase ,references=UpperCAmelCase )
accelerator.print('Average test metrics from all folds:' ,UpperCAmelCase )
def _A ( ):
'''simple docstring'''
A__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' ,type=UpperCAmelCase ,default=UpperCAmelCase ,choices=['no', 'fp16', 'bf16', 'fp8'] ,help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' ,)
parser.add_argument('--cpu' ,action='store_true' ,help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' ,type=UpperCAmelCase ,default=3 ,help='The number of splits to perform across the dataset' )
A__ = parser.parse_args()
A__ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCAmelCase ,UpperCAmelCase )
if __name__ == "__main__":
main()
| 531 | 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
A = re.compile(R'\s+')
def a(lowercase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowercase__ , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = [len(lowercase__ ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(lowercase__ ), "line_max": max(lowercase__ )}
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a(lowercase__ , lowercase__=5 ):
'''simple docstring'''
snake_case_ = ['auto-generated', 'autogenerated', 'automatically generated']
snake_case_ = example['content'].splitlines()
for _, line in zip(range(lowercase__ ) , lowercase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a(lowercase__ , lowercase__=5 , lowercase__=0.05 ):
'''simple docstring'''
snake_case_ = ['unit tests', 'test file', 'configuration file']
snake_case_ = example['content'].splitlines()
snake_case_ = 0
snake_case_ = 0
# first test
for _, line in zip(range(lowercase__ ) , lowercase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ = example['content'].count('\n' )
snake_case_ = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = ['def ', 'class ', 'for ', 'while ']
snake_case_ = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a(lowercase__ , lowercase__=4 ):
'''simple docstring'''
snake_case_ = example['content'].splitlines()
snake_case_ = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = tokenizer(example['content'] , truncation=lowercase__ )['input_ids']
snake_case_ = len(example['content'] ) / len(lowercase__ )
return {"ratio": ratio}
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = {}
results.update(get_hash(lowercase__ ) )
results.update(line_stats(lowercase__ ) )
results.update(alpha_stats(lowercase__ ) )
results.update(char_token_ratio(lowercase__ ) )
results.update(is_autogenerated(lowercase__ ) )
results.update(is_config_or_test(lowercase__ ) )
results.update(has_no_keywords(lowercase__ ) )
results.update(has_few_assignments(lowercase__ ) )
return results
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if not check_uniques(lowercase__ , lowercase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a(lowercase__ ):
'''simple docstring'''
with open(lowercase__ , 'rb' ) as f_in:
with gzip.open(str(lowercase__ ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowercase__ , lowercase__ )
os.unlink(lowercase__ )
# Settings
A = HfArgumentParser(PreprocessingArguments)
A = parser.parse_args()
if args.num_workers is None:
A = multiprocessing.cpu_count()
A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
A = time.time()
A = load_dataset(args.dataset_name, split='train')
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
A = time.time()
A = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
A = set(ds.unique('hash'))
A = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
A = time.time()
A = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
A = time.time()
A , A = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
A = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
A = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
A = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
A = str(data_dir / f"""file-{file_number+1:012}.json""")
A = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 46 |
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = name
snake_case_ = val
def __str__( self ):
"""simple docstring"""
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , __UpperCamelCase ):
"""simple docstring"""
return self.val < other.val
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = {}
snake_case_ = {}
snake_case_ = self.build_heap(__UpperCamelCase )
def __getitem__( self , __UpperCamelCase ):
"""simple docstring"""
return self.get_value(__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return (idx - 1) // 2
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return idx * 2 + 1
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return idx * 2 + 2
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return self.heap_dict[key]
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = len(__UpperCamelCase ) - 1
snake_case_ = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
snake_case_ = idx
snake_case_ = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
while True:
snake_case_ = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
snake_case_ = self.get_right_child_idx(__UpperCamelCase )
snake_case_ = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
snake_case_ = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
snake_case_ = r
if smallest != idx:
snake_case_ , snake_case_ = array[smallest], array[idx]
(
(
snake_case_
) , (
snake_case_
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
snake_case_ = smallest
else:
break
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
snake_case_ , snake_case_ = self.heap[idx], self.heap[p]
snake_case_ , snake_case_ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
snake_case_ = p
snake_case_ = self.get_parent_idx(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return self.heap[0]
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.heap[-1], self.heap[0]
snake_case_ , snake_case_ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
snake_case_ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
self.heap.append(__UpperCamelCase )
snake_case_ = len(self.heap ) - 1
snake_case_ = node.val
self.sift_up(len(self.heap ) - 1 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.heap ) == 0
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
snake_case_ = new_value
snake_case_ = new_value
self.sift_up(self.idx_of_element[node] )
A = Node('R', -1)
A = Node('B', 6)
A = Node('A', 3)
A = Node('X', 1)
A = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
A = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | 1 |
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_snake_case : List[Any] = True
from torch.cuda.amp import autocast
_snake_case : Dict = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : str=None, lowerCAmelCase_ : str=None ):
return field(default_factory=lambda: default, metadata=lowerCAmelCase_ )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
a_ = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
a_ = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
a_ = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
a_ = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
a_ = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = None
a_ = None
def __call__( self : int , lowerCAmelCase_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__lowerCAmelCase = [{'input_values': feature['input_values']} for feature in features]
__lowerCAmelCase = [{'input_ids': feature['labels']} for feature in features]
__lowerCAmelCase = self.processor.pad(
lowerCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__lowerCAmelCase = self.processor.pad(
labels=lowerCAmelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__lowerCAmelCase = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
__lowerCAmelCase = labels
return batch
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Tuple , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
__lowerCAmelCase = self._prepare_inputs(lowerCAmelCase_ )
if self.use_amp:
with autocast():
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__lowerCAmelCase = self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCAmelCase = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__lowerCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase_ )
else:
loss.backward()
return loss.detach()
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__lowerCAmelCase = datasets.load_dataset(
'common_voice', data_args.dataset_config_name, split=data_args.train_split_name )
__lowerCAmelCase = datasets.load_dataset('common_voice', data_args.dataset_config_name, split='test' )
# Create and save tokenizer
__lowerCAmelCase = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(lowerCAmelCase_ : Any ):
__lowerCAmelCase = re.sub(lowerCAmelCase_, '', batch['sentence'] ).lower() + ' '
return batch
__lowerCAmelCase = train_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
__lowerCAmelCase = eval_dataset.map(lowerCAmelCase_, remove_columns=['sentence'] )
def extract_all_chars(lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = ' '.join(batch['text'] )
__lowerCAmelCase = list(set(lowerCAmelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=train_dataset.column_names, )
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, batched=lowerCAmelCase_, batch_size=-1, keep_in_memory=lowerCAmelCase_, remove_columns=eval_dataset.column_names, )
__lowerCAmelCase = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__lowerCAmelCase = {v: k for k, v in enumerate(lowerCAmelCase_ )}
__lowerCAmelCase = vocab_dict[' ']
del vocab_dict[" "]
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = len(lowerCAmelCase_ )
with open('vocab.json', 'w' ) as vocab_file:
json.dump(lowerCAmelCase_, lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = WavaVecaCTCTokenizer(
'vocab.json', unk_token='[UNK]', pad_token='[PAD]', word_delimiter_token='|', )
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0.0, do_normalize=lowerCAmelCase_, return_attention_mask=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase_, tokenizer=lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction='mean', pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer ), )
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCAmelCase_ ), data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCAmelCase_ ) )
if data_args.max_val_samples is not None:
__lowerCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) )
__lowerCAmelCase = torchaudio.transforms.Resample(4_8000, 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCAmelCase_ : int ):
__lowerCAmelCase , __lowerCAmelCase = torchaudio.load(batch['path'] )
__lowerCAmelCase = resampler(lowerCAmelCase_ ).squeeze().numpy()
__lowerCAmelCase = 1_6000
__lowerCAmelCase = batch['text']
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
def prepare_dataset(lowerCAmelCase_ : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
__lowerCAmelCase = processor(
audio=batch['speech'], text=batch['target_text'], sampling_rate=batch['sampling_rate'][0] )
batch.update(lowerCAmelCase_ )
return batch
__lowerCAmelCase = train_dataset.map(
lowerCAmelCase_, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
__lowerCAmelCase = eval_dataset.map(
lowerCAmelCase_, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, )
# Metric
__lowerCAmelCase = datasets.load_metric('wer' )
def compute_metrics(lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = pred.predictions
__lowerCAmelCase = np.argmax(lowerCAmelCase_, axis=-1 )
__lowerCAmelCase = processor.tokenizer.pad_token_id
__lowerCAmelCase = processor.batch_decode(lowerCAmelCase_ )
# we do not want to group tokens when computing the metrics
__lowerCAmelCase = processor.batch_decode(pred.label_ids, group_tokens=lowerCAmelCase_ )
__lowerCAmelCase = wer_metric.compute(predictions=lowerCAmelCase_, references=lowerCAmelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__lowerCAmelCase = DataCollatorCTCWithPadding(processor=lowerCAmelCase_, padding=lowerCAmelCase_ )
# Initialize our Trainer
__lowerCAmelCase = CTCTrainer(
model=lowerCAmelCase_, data_collator=lowerCAmelCase_, args=lowerCAmelCase_, compute_metrics=lowerCAmelCase_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor.feature_extractor, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('train', lowerCAmelCase_ )
trainer.save_metrics('train', lowerCAmelCase_ )
trainer.save_state()
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase_ )
__lowerCAmelCase = min(lowerCAmelCase_, len(lowerCAmelCase_ ) )
trainer.log_metrics('eval', lowerCAmelCase_ )
trainer.save_metrics('eval', lowerCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 53 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = field(default='''language-modeling''', metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCAmelCase = Features({'''text''': Value('''string''' )} )
_UpperCAmelCase = Features({} )
_UpperCAmelCase = "text"
@property
def A ( self : int ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 32 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 32 | 1 |
"""simple docstring"""
import os
import sys
__UpperCAmelCase = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__UpperCAmelCase = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoConfig.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoModel.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
| 65 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[int] = '''speech_to_text'''
__lowercase : List[str] = ['''past_key_values''']
__lowercase : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase__=1_0_0_0_0 , lowerCAmelCase__=1_2 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=4 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=4 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=6_0_0_0 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=2 , lowerCAmelCase__=(5, 5) , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=8_0 , lowerCAmelCase__=1 , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
__SCREAMING_SNAKE_CASE = max_source_positions
__SCREAMING_SNAKE_CASE = max_target_positions
__SCREAMING_SNAKE_CASE = num_conv_layers
__SCREAMING_SNAKE_CASE = list(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = conv_channels
__SCREAMING_SNAKE_CASE = input_feat_per_channel
__SCREAMING_SNAKE_CASE = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`.")
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 155 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase =get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
lowercase =5
lowercase =10
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =SpeechaTextTokenizer
UpperCAmelCase =False
UpperCAmelCase =True
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
super().setUp()
_UpperCAmelCase : Dict =sp.SentencePieceProcessor()
spm_model.Load(snake_case)
_UpperCAmelCase : Any =['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(snake_case))]
_UpperCAmelCase : List[str] =dict(zip(snake_case , range(len(snake_case))))
_UpperCAmelCase : Union[str, Any] =Path(self.tmpdirname)
save_json(snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCAmelCase : Optional[int] =SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : str ='<pad>'
_UpperCAmelCase : str =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case) , snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case) , snake_case)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Any =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(snake_case) , 1_0_0_1)
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : str =SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCAmelCase : int =tokenizer.tokenize('This is a test')
self.assertListEqual(snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , )
_UpperCAmelCase : List[str] =tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCAmelCase : Dict =tokenizer.convert_tokens_to_ids(snake_case)
self.assertListEqual(snake_case , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8])
_UpperCAmelCase : Tuple =tokenizer.convert_ids_to_tokens(snake_case)
self.assertListEqual(
snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
# fmt: off
_UpperCAmelCase : Optional[Any] ={'input_ids': [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class __magic_name__ ( unittest.TestCase ):
UpperCAmelCase ="valhalla/s2t_mustc_multilinguial_medium"
UpperCAmelCase ="C'est trop cool"
UpperCAmelCase ="Esto es genial"
@classmethod
def lowerCAmelCase ( cls) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : SpeechaTextTokenizer =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 1_1)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0)
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
self.assertIn(snake_case , self.tokenizer.all_special_ids)
_UpperCAmelCase : Optional[int] =[ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
_UpperCAmelCase : Tuple =self.tokenizer.decode(snake_case , skip_special_tokens=snake_case)
_UpperCAmelCase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case)
self.assertEqual(snake_case , snake_case)
self.assertNotIn(self.tokenizer.eos_token , snake_case)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : str ='fr'
_UpperCAmelCase : List[str] =self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any ='fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCAmelCase : List[Any] ='es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 331 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowercase =logging.getLogger(__name__)
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase : Any =np.argmax(__lowerCamelCase , axis=1 )
return np.sum(outputs == labels )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
'''simple docstring'''
with open(__lowerCamelCase , encoding='utf_8' ) as f:
_UpperCAmelCase : Union[str, Any] =csv.reader(__lowerCamelCase )
_UpperCAmelCase : Optional[int] =[]
next(__lowerCamelCase ) # skip the first line
for line in tqdm(__lowerCamelCase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase : List[Any] =[]
for dataset in encoded_datasets:
_UpperCAmelCase : Any =len(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_UpperCAmelCase : Optional[int] =np.zeros((n_batch, 2) , dtype=np.intaa )
_UpperCAmelCase : Dict =np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
_UpperCAmelCase : str =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__lowerCamelCase ):
_UpperCAmelCase : int =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_UpperCAmelCase : str =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_UpperCAmelCase : Optional[int] =with_conta
_UpperCAmelCase : str =with_conta
_UpperCAmelCase : str =len(__lowerCamelCase ) - 1
_UpperCAmelCase : Optional[Any] =len(__lowerCamelCase ) - 1
_UpperCAmelCase : Any =with_conta
_UpperCAmelCase : Optional[Any] =with_conta
_UpperCAmelCase : str =mc_label
_UpperCAmelCase : Union[str, Any] =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__lowerCamelCase ) for t in all_inputs ) )
return tensor_datasets
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : str =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__lowerCamelCase , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=__lowerCamelCase , default='' )
parser.add_argument('--eval_dataset' , type=__lowerCamelCase , default='' )
parser.add_argument('--seed' , type=__lowerCamelCase , default=4_2 )
parser.add_argument('--num_train_epochs' , type=__lowerCamelCase , default=3 )
parser.add_argument('--train_batch_size' , type=__lowerCamelCase , default=8 )
parser.add_argument('--eval_batch_size' , type=__lowerCamelCase , default=1_6 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__lowerCamelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=__lowerCamelCase , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=__lowerCamelCase , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowerCamelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=__lowerCamelCase , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=__lowerCamelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=__lowerCamelCase , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=__lowerCamelCase , default=0.01 )
parser.add_argument('--lm_coef' , type=__lowerCamelCase , default=0.9 )
parser.add_argument('--n_valid' , type=__lowerCamelCase , default=3_7_4 )
parser.add_argument('--server_ip' , type=__lowerCamelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCamelCase , default='' , help='Can be used for distant debugging.' )
_UpperCAmelCase : Optional[Any] =parser.parse_args()
print(__lowerCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_UpperCAmelCase : Any =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_UpperCAmelCase : Tuple =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(__lowerCamelCase , __lowerCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_UpperCAmelCase : List[str] =['_start_', '_delimiter_', '_classify_']
_UpperCAmelCase : Any =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__lowerCamelCase )
_UpperCAmelCase : str =tokenizer.convert_tokens_to_ids(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__lowerCamelCase ) )
model.to(__lowerCamelCase )
# Load and encode the datasets
def tokenize_and_encode(__lowerCamelCase : Dict ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__lowerCamelCase ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return obj
return [tokenize_and_encode(__lowerCamelCase ) for o in obj]
logger.info('Encoding dataset...' )
_UpperCAmelCase : int =load_rocstories_dataset(args.train_dataset )
_UpperCAmelCase : Dict =load_rocstories_dataset(args.eval_dataset )
_UpperCAmelCase : Optional[Any] =(train_dataset, eval_dataset)
_UpperCAmelCase : Dict =tokenize_and_encode(__lowerCamelCase )
# Compute the max input length for the Transformer
_UpperCAmelCase : int =model.config.n_positions // 2 - 2
_UpperCAmelCase : str =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_UpperCAmelCase : Tuple =min(__lowerCamelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_UpperCAmelCase : Any =pre_process_datasets(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , *__lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase : str =tensor_datasets[0], tensor_datasets[1]
_UpperCAmelCase : int =TensorDataset(*__lowerCamelCase )
_UpperCAmelCase : Optional[int] =RandomSampler(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.train_batch_size )
_UpperCAmelCase : Dict =TensorDataset(*__lowerCamelCase )
_UpperCAmelCase : str =SequentialSampler(__lowerCamelCase )
_UpperCAmelCase : int =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_UpperCAmelCase : List[Any] =args.max_steps
_UpperCAmelCase : Tuple =args.max_steps // (len(__lowerCamelCase ) // args.gradient_accumulation_steps) + 1
else:
_UpperCAmelCase : int =len(__lowerCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
_UpperCAmelCase : int =list(model.named_parameters() )
_UpperCAmelCase : Optional[int] =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
_UpperCAmelCase : int =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
_UpperCAmelCase : Optional[Any] =AdamW(__lowerCamelCase , lr=args.learning_rate , eps=args.adam_epsilon )
_UpperCAmelCase : Dict =get_linear_schedule_with_warmup(
__lowerCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=__lowerCamelCase )
if args.do_train:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
_UpperCAmelCase : Union[str, Any] =0
_UpperCAmelCase : Tuple =0
_UpperCAmelCase : Any =tqdm(__lowerCamelCase , desc='Training' )
for step, batch in enumerate(__lowerCamelCase ):
_UpperCAmelCase : List[str] =tuple(t.to(__lowerCamelCase ) for t in batch )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] =batch
_UpperCAmelCase : List[str] =model(__lowerCamelCase , mc_token_ids=__lowerCamelCase , lm_labels=__lowerCamelCase , mc_labels=__lowerCamelCase )
_UpperCAmelCase : List[str] =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_UpperCAmelCase : Union[str, Any] =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_UpperCAmelCase : str ='Training loss: {:.2e} lr: {:.2e}'.format(__lowerCamelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_UpperCAmelCase : Dict =model.module if hasattr(__lowerCamelCase , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_UpperCAmelCase : List[str] =os.path.join(args.output_dir , __lowerCamelCase )
_UpperCAmelCase : Optional[int] =os.path.join(args.output_dir , __lowerCamelCase )
torch.save(model_to_save.state_dict() , __lowerCamelCase )
model_to_save.config.to_json_file(__lowerCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_UpperCAmelCase : int =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_UpperCAmelCase : Optional[int] =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__lowerCamelCase )
if args.do_eval:
model.eval()
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =0, 0
_UpperCAmelCase , _UpperCAmelCase : Dict =0, 0
for batch in tqdm(__lowerCamelCase , desc='Evaluating' ):
_UpperCAmelCase : str =tuple(t.to(__lowerCamelCase ) for t in batch )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict =batch
with torch.no_grad():
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict =model(
__lowerCamelCase , mc_token_ids=__lowerCamelCase , lm_labels=__lowerCamelCase , mc_labels=__lowerCamelCase )
_UpperCAmelCase : str =mc_logits.detach().cpu().numpy()
_UpperCAmelCase : Optional[Any] =mc_labels.to('cpu' ).numpy()
_UpperCAmelCase : Tuple =accuracy(__lowerCamelCase , __lowerCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_UpperCAmelCase : List[Any] =eval_loss / nb_eval_steps
_UpperCAmelCase : Union[str, Any] =eval_accuracy / nb_eval_examples
_UpperCAmelCase : Optional[Any] =tr_loss / nb_tr_steps if args.do_train else None
_UpperCAmelCase : Optional[Any] ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
_UpperCAmelCase : Optional[Any] =os.path.join(args.output_dir , 'eval_results.txt' )
with open(__lowerCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __lowerCamelCase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 331 | 1 |
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _snake_case ( A_ : Union[str, Any] , A_ : Optional[int] ):
"""simple docstring"""
a_ : List[str] = Mock()
a_ : Union[str, Any] = conn, Mock()
a_ : Tuple = iter([1, None] )
a_ : List[str] = lambda A_ : next(A_ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=A_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 577 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _snake_case ( A_ : Optional[Any] ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
super().__init__()
a_ : Optional[Any] = module
a_ : Union[str, Any] = nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
a_ : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _lowerCAmelCase ( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = "bigscience/bloom-1b7"
# Constant values
a_ = 2.109659552692574
a_ = "Hello my name is"
a_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
a_ = 10
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = AutoTokenizer.from_pretrained(self.model_name )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
a_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
a_ : Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , """quantization_config""" ) )
a_ : Any = config.to_dict()
a_ : Dict = config.to_diff_dict()
a_ : Dict = config.to_json_string()
def _lowerCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
a_ : List[str] = self.model_fpaa.get_memory_footprint()
a_ : Optional[int] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a_ : Union[str, Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _lowerCAmelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" )
a_ : str = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = BitsAndBytesConfig()
a_ : Union[str, Any] = True
a_ : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map="""auto""" )
a_ : str = self.tokenizer(self.input_text , return_tensors="""pt""" )
a_ : Tuple = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def _lowerCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
a_ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def _lowerCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a_ : List[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" )
a_ : Union[str, Any] = self.model_fpaa.to(torch.floataa )
a_ : Union[str, Any] = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a_ : int = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
a_ : Optional[Any] = self.model_fpaa.half()
# Check this does not throw an error
a_ : Dict = self.model_fpaa.float()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _lowerCAmelCase ( cls ):
'''simple docstring'''
a_ : List[str] = """t5-small"""
a_ : Any = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
a_ : Optional[Any] = AutoTokenizer.from_pretrained(cls.model_name )
a_ : int = """Translate in German: Hello, my dog is cute"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
a_ : Dict = TaForConditionalGeneration._keep_in_fpaa_modules
a_ : str = None
# test with `t5-small`
a_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
a_ : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : str = model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
a_ : Optional[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
a_ : str = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : Any = model.generate(**lowerCAmelCase_ )
a_ : List[str] = modules
def _lowerCAmelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a_ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a_ : Tuple = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : Union[str, Any] = model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
a_ : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
a_ : str = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
a_ : Tuple = model.generate(**lowerCAmelCase_ )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
a_ : Dict = """bigscience/bloom-560m"""
a_ : Any = """t5-small"""
# Different types of model
a_ : Tuple = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Sequence classification model
a_ : Any = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# CausalLM model
a_ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
# Seq2seq model
a_ : str = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map="""auto""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def _lowerCAmelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a_ : List[Any] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a_ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
a_ : Dict = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = """facebook/opt-350m"""
super().setUp()
def _lowerCAmelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
a_ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a_ : int = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a_ : Any = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
a_ : List[str] = LoRALayer(module.q_proj , rank=16 )
a_ : Union[str, Any] = LoRALayer(module.k_proj , rank=16 )
a_ : Optional[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a_ : Tuple = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a_ : List[str] = model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = "gpt2-xl"
a_ = 3.3191854854152187
| 577 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> Dict:
__lowercase , __lowercase = image.size
__lowercase , __lowercase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowercase = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
__lowercase = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
__lowercase = image[None].transpose(0 , 3 , 1 , 2 )
__lowercase = torch.from_numpy(SCREAMING_SNAKE_CASE )
return 2.0 * image - 1.0
class A__ ( lowerCAmelCase__ ):
def __init__( self : Optional[Any] , _UpperCAmelCase : VQModel , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : str , _UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : Optional[int] = 1_00 , _UpperCAmelCase : Optional[float] = 0.0 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , PIL.Image.Image ):
__lowercase = 1
elif isinstance(_UpperCAmelCase , torch.Tensor ):
__lowercase = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
if isinstance(_UpperCAmelCase , PIL.Image.Image ):
__lowercase = preprocess(_UpperCAmelCase )
__lowercase , __lowercase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__lowercase = (batch_size, self.unet.config.in_channels // 2, height, width)
__lowercase = next(self.unet.parameters() ).dtype
__lowercase = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
__lowercase = image.to(device=self.device , dtype=_UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_UpperCAmelCase , device=self.device )
__lowercase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for t in self.progress_bar(_UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
__lowercase = torch.cat([latents, image] , dim=1 )
__lowercase = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
__lowercase = self.vqvae.decode(_UpperCAmelCase ).sample
__lowercase = torch.clamp(_UpperCAmelCase , -1.0 , 1.0 )
__lowercase = image / 2 + 0.5
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 688 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__lowercase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : int , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 688 | 1 |
"""simple docstring"""
import math
import unittest
def snake_case_ ( A_ : int ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 83 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
UpperCamelCase__ = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class a ( lowercase , unittest.TestCase ):
UpperCamelCase : Optional[int] = CamembertTokenizer
UpperCamelCase : Optional[int] = CamembertTokenizerFast
UpperCamelCase : str = True
UpperCamelCase : Tuple = True
def __snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : int = CamembertTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = '<pad>'
UpperCAmelCase__ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(UpperCamelCase_ ) , 1_004 )
def __snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def __snake_case ( self ):
UpperCAmelCase__ : Dict = CamembertTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : str = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCAmelCase__ : Union[str, Any] = 'I was born in 92000, and this is falsé.'
UpperCAmelCase__ : Optional[Any] = tokenizer.encode(UpperCamelCase_ )
UpperCAmelCase__ : Dict = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : str = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCAmelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
UpperCAmelCase__ : str = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def __snake_case ( self ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : Any = self.get_tokenizer()
UpperCAmelCase__ : List[str] = self.get_rust_tokenizer()
UpperCAmelCase__ : List[str] = 'I was born in 92000, and this is falsé.'
UpperCAmelCase__ : List[str] = tokenizer.tokenize(UpperCamelCase_ )
UpperCAmelCase__ : str = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : str = self.get_rust_tokenizer()
UpperCAmelCase__ : Union[str, Any] = tokenizer.encode(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def __snake_case ( self ):
# fmt: off
UpperCAmelCase__ : Any = {'input_ids': [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCAmelCase__ : Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=UpperCamelCase_ , )
| 110 | 0 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase ( __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] ="""mask2former"""
_SCREAMING_SNAKE_CASE : Optional[int] =["""swin"""]
_SCREAMING_SNAKE_CASE : List[Any] ={"""hidden_size""": """hidden_dim"""}
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = 256 , lowerCAmelCase__ = 256 , lowerCAmelCase__ = 256 , lowerCAmelCase__ = 1024 , lowerCAmelCase__ = "relu" , lowerCAmelCase__ = 6 , lowerCAmelCase__ = 10 , lowerCAmelCase__ = 8 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 2048 , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = 4 , lowerCAmelCase__ = 255 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 2.0 , lowerCAmelCase__ = 5.0 , lowerCAmelCase__ = 5.0 , lowerCAmelCase__ = 12544 , lowerCAmelCase__ = 3.0 , lowerCAmelCase__ = 0.75 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = True , lowerCAmelCase__ = [4, 8, 16, 32] , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
_A= CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCAmelCase_ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_A= backbone_config.pop('model_type' )
_A= CONFIG_MAPPING[backbone_model_type]
_A= config_class.from_dict(UpperCAmelCase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
f"Supported model types: {','.join(self.backbones_supported )}" )
_A= backbone_config
_A= feature_size
_A= mask_feature_size
_A= hidden_dim
_A= encoder_feedforward_dim
_A= activation_function
_A= encoder_layers
_A= decoder_layers
_A= num_attention_heads
_A= dropout
_A= dim_feedforward
_A= pre_norm
_A= enforce_input_projection
_A= common_stride
_A= ignore_value
_A= num_queries
_A= no_object_weight
_A= class_weight
_A= mask_weight
_A= dice_weight
_A= train_num_points
_A= oversample_ratio
_A= importance_sample_ratio
_A= init_std
_A= init_xavier_std
_A= use_auxiliary_loss
_A= feature_strides
_A= output_auxiliary_logits
_A= decoder_layers
super().__init__(**UpperCAmelCase_ )
@classmethod
def a__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
return cls(
backbone_config=UpperCAmelCase_ , **UpperCAmelCase_ , )
def a__ ( self ):
_A= copy.deepcopy(self.__dict__ )
_A= self.backbone_config.to_dict()
_A= self.__class__.model_type
return output
| 706 | #
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def UpperCamelCase ( *lowerCAmelCase_ ) -> Dict:
'''simple docstring'''
with open(lowerCAmelCase_ , 'r' ) as fh:
fcntl.flock(lowerCAmelCase_ , fcntl.LOCK_EX )
try:
print(*lowerCAmelCase_ )
finally:
fcntl.flock(lowerCAmelCase_ , fcntl.LOCK_UN )
UpperCAmelCase_ = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
UpperCAmelCase_ = torch.device('''cuda''', local_rank)
UpperCAmelCase_ = socket.gethostname()
UpperCAmelCase_ = F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase_ = dist.get_rank()
UpperCAmelCase_ = dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise | 476 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
raise RuntimeError("CUDA out of memory." )
class A ( nn.Module ):
def __init__(self : int ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Linear(3 , 4 )
UpperCAmelCase__ = nn.BatchNormad(4 )
UpperCAmelCase__ = nn.Linear(4 , 5 )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__UpperCAmelCase ) ) )
class A ( unittest.TestCase ):
def lowercase_ (self : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__UpperCAmelCase : List[str] ):
nonlocal batch_sizes
batch_sizes.append(__UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__UpperCAmelCase , [1_2_8, 6_4, 3_2, 1_6, 8] )
def lowercase_ (self : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__UpperCAmelCase : Any , __UpperCAmelCase : Any ):
nonlocal batch_sizes
batch_sizes.append(__UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase__ , UpperCAmelCase__ = mock_training_loop_function("hello" )
self.assertListEqual(__UpperCAmelCase , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__UpperCAmelCase : Dict ):
pass
with self.assertRaises(__UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def lowercase_ (self : int ) -> List[str]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__UpperCAmelCase : Union[str, Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def lowercase_ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__UpperCAmelCase ) as cm:
mock_training_loop_function(1_2_8 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__UpperCAmelCase : List[str] ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(__UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def lowercase_ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = torch.cuda.memory_allocated()
UpperCAmelCase__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __UpperCAmelCase )
UpperCAmelCase__ = release_memory(__UpperCAmelCase )
self.assertEqual(torch.cuda.memory_allocated() , __UpperCAmelCase )
| 486 | def lowerCAmelCase_ ( __A, __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = [1]
for i in range(2, __A ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase__ = []
UpperCAmelCase__ = list(range(__A ) )
# Find permutation
while factorials:
UpperCAmelCase__ = factorials.pop()
UpperCAmelCase__ , UpperCAmelCase__ = divmod(__A, __A )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 486 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _snake_case ( a__ ):
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase , "width_multiplier" ) )
class _snake_case :
def __init__( self : Any , UpperCAmelCase : str , UpperCAmelCase : int=13 , UpperCAmelCase : Tuple=64 , UpperCAmelCase : Any=2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : str="swish" , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=0.1 , UpperCAmelCase : Tuple=0.0_2 , UpperCAmelCase : Any=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : int=None , UpperCAmelCase : str=0.2_5 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : int=0.0 , ):
__lowerCamelCase : int = parent
__lowerCamelCase : str = batch_size
__lowerCamelCase : Union[str, Any] = image_size
__lowerCamelCase : Dict = patch_size
__lowerCamelCase : Union[str, Any] = num_channels
__lowerCamelCase : List[str] = make_divisible(512 * width_multiplier , divisor=8 )
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : List[str] = conv_kernel_size
__lowerCamelCase : int = output_stride
__lowerCamelCase : Union[str, Any] = classifier_dropout_prob
__lowerCamelCase : Tuple = use_labels
__lowerCamelCase : Tuple = is_training
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : Dict = initializer_range
__lowerCamelCase : Dict = scope
__lowerCamelCase : List[str] = width_multiplier
__lowerCamelCase : Optional[Any] = ffn_dropout
__lowerCamelCase : List[Any] = attn_dropout
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : int = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase__ ( self : Union[str, Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : str ):
__lowerCamelCase : int = MobileViTVaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : Tuple = model(UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ):
__lowerCamelCase : str = self.num_labels
__lowerCamelCase : Tuple = MobileViTVaForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : Optional[int] = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] ):
__lowerCamelCase : str = self.num_labels
__lowerCamelCase : Any = MobileViTVaForSemanticSegmentation(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : List[str] = model(UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowerCamelCase : List[Any] = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = config_and_inputs
__lowerCamelCase : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case__ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : List[str] = MobileViTVaModelTester(self )
__lowerCamelCase : List[Any] = MobileViTVaConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def lowerCamelCase__ ( self : Any ):
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def lowerCamelCase__ ( self : Any ):
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def lowerCamelCase__ ( self : Optional[int] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def lowerCamelCase__ ( self : List[Any] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase__ ( self : Optional[Any] ):
pass
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase , __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(UpperCAmelCase )
__lowerCamelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : int = [*signature.parameters.keys()]
__lowerCamelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
def check_hidden_states_output(UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] ):
__lowerCamelCase : str = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__lowerCamelCase : Tuple = outputs.hidden_states
__lowerCamelCase : Tuple = 5
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowerCamelCase : Union[str, Any] = 2
for i in range(len(UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Union[str, Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowercase_ ( ) -> Any:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Any ):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Union[str, Any] = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
UpperCAmelCase )
__lowerCamelCase : List[str] = self.default_image_processor
__lowerCamelCase : Union[str, Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowerCamelCase : str = model(**UpperCAmelCase )
# verify the logits
__lowerCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
__lowerCamelCase : str = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : List[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
__lowerCamelCase : Union[str, Any] = model.to(UpperCAmelCase )
__lowerCamelCase : str = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
__lowerCamelCase : Dict = prepare_img()
__lowerCamelCase : int = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(**UpperCAmelCase )
__lowerCamelCase : int = outputs.logits
# verify the logits
__lowerCamelCase : Any = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCAmelCase )
__lowerCamelCase : Tuple = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : int = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
__lowerCamelCase : Any = model.to(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
__lowerCamelCase : str = prepare_img()
__lowerCamelCase : Dict = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowerCamelCase : List[Any] = model(**UpperCAmelCase )
__lowerCamelCase : str = outputs.logits.detach().cpu()
__lowerCamelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase , target_sizes=[(50, 60)] )
__lowerCamelCase : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase )
__lowerCamelCase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase )
__lowerCamelCase : List[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase ) | 366 | """simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__A = 4
__A = 3
class _snake_case ( a__ ):
pass
def lowercase_ ( _lowerCamelCase: List[str] ) -> List[str]:
'''simple docstring'''
for shard in shards:
for i in range(_lowerCamelCase ):
yield {"i": i, "shard": shard}
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : List[Any] = int(os.environ["RANK"] )
__lowerCamelCase : Optional[int] = int(os.environ["WORLD_SIZE"] )
__lowerCamelCase : Any = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase )
parser.add_argument("--local_rank" , type=_lowerCamelCase )
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0 )
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : str = args.streaming
__lowerCamelCase : List[Any] = args.num_workers
__lowerCamelCase : Optional[Any] = {"shards": [F"""shard_{shard_idx}""" for shard_idx in range(_lowerCamelCase )]}
__lowerCamelCase : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase )
if not streaming:
__lowerCamelCase : Optional[int] = Dataset.from_list(list(_lowerCamelCase ) )
__lowerCamelCase : Union[str, Any] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase )
__lowerCamelCase : Optional[Any] = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase )
__lowerCamelCase : List[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__lowerCamelCase : Optional[Any] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__lowerCamelCase : Optional[Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main() | 366 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase ( a__ : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase = botoa.client('''iam''' )
_UpperCamelCase = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=a__ , AssumeRolePolicyDocument=json.dumps(a__ , indent=2 ) )
_UpperCamelCase = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=a__ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(a__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowercase ( a__ : str ) -> Any:
_UpperCamelCase = botoa.client('''iam''' )
return iam_client.get_role(RoleName=a__ )["Role"]["Arn"]
def lowercase ( ) -> str:
_UpperCamelCase = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , a__ , )
_UpperCamelCase = None
if credentials_configuration == 0:
_UpperCamelCase = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
_UpperCamelCase = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
_UpperCamelCase = _ask_field('''AWS Access Key ID: ''' )
_UpperCamelCase = aws_access_key_id
_UpperCamelCase = _ask_field('''AWS Secret Access Key: ''' )
_UpperCamelCase = aws_secret_access_key
_UpperCamelCase = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
_UpperCamelCase = aws_region
_UpperCamelCase = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , a__ , )
if role_management == 0:
_UpperCamelCase = _ask_field('''Enter your IAM role name: ''' )
else:
_UpperCamelCase = '''accelerate_sagemaker_execution_role'''
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(a__ )
_UpperCamelCase = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
_UpperCamelCase = None
if is_custom_docker_image:
_UpperCamelCase = _ask_field('''Enter your Docker image: ''' , lambda a__ : str(a__ ).lower() )
_UpperCamelCase = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
_UpperCamelCase = None
if is_sagemaker_inputs_enabled:
_UpperCamelCase = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda a__ : str(a__ ).lower() , )
_UpperCamelCase = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
_UpperCamelCase = None
if is_sagemaker_metrics_enabled:
_UpperCamelCase = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda a__ : str(a__ ).lower() , )
_UpperCamelCase = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
_UpperCamelCase = {}
_UpperCamelCase = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
if use_dynamo:
_UpperCamelCase = '''dynamo_'''
_UpperCamelCase = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_UpperCamelCase = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
if use_custom_options:
_UpperCamelCase = _ask_options(
'''Which mode do you want to use?''' , a__ , lambda a__ : TORCH_DYNAMO_MODES[int(a__ )] , default='''default''' , )
_UpperCamelCase = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
_UpperCamelCase = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , )
_UpperCamelCase = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
_UpperCamelCase = _ask_options(
a__ , a__ , lambda a__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(a__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_UpperCamelCase = _ask_field(a__ , lambda a__ : str(a__ ).lower() , default='''ml.p3.2xlarge''' )
_UpperCamelCase = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_UpperCamelCase = _ask_field(
'''How many machines do you want use? [1]: ''' , a__ , default=1 , )
_UpperCamelCase = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=a__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=a__ , use_cpu=a__ , dynamo_config=a__ , eca_instance_type=a__ , profile=a__ , region=a__ , iam_role_name=a__ , mixed_precision=a__ , num_machines=a__ , sagemaker_inputs_file=a__ , sagemaker_metrics_file=a__ , )
| 420 | """simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase ( a__ : Optional[int] , a__ : Dict=False ) -> Tuple:
try:
_UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCamelCase = strtobool(a__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase = parse_flag_from_env("""RUN_SLOW""", default=False)
UpperCAmelCase = parse_flag_from_env("""RUN_REMOTE""", default=False)
UpperCAmelCase = parse_flag_from_env("""RUN_LOCAL""", default=True)
UpperCAmelCase = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
UpperCAmelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
UpperCAmelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
UpperCAmelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
UpperCAmelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
UpperCAmelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
UpperCAmelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
UpperCAmelCase = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def lowercase ( a__ : Union[str, Any] ) -> str:
try:
import faiss # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires faiss''' )(a__ )
return test_case
def lowercase ( a__ : List[Any] ) -> str:
try:
import regex # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires regex''' )(a__ )
return test_case
def lowercase ( a__ : Any ) -> Union[str, Any]:
try:
import elasticsearch # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(a__ )
return test_case
def lowercase ( a__ : List[Any] ) -> Optional[Any]:
try:
import sqlalchemy # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(a__ )
return test_case
def lowercase ( a__ : Any ) -> Union[str, Any]:
if not config.TORCH_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires PyTorch''' )(a__ )
return test_case
def lowercase ( a__ : str ) -> Optional[Any]:
if not config.TF_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(a__ )
return test_case
def lowercase ( a__ : Optional[int] ) -> str:
if not config.JAX_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires JAX''' )(a__ )
return test_case
def lowercase ( a__ : str ) -> int:
if not config.PIL_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires Pillow''' )(a__ )
return test_case
def lowercase ( a__ : Union[str, Any] ) -> Dict:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(a__ )
else:
return test_case
def lowercase ( a__ : List[Any] ) -> Optional[int]:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(a__ )
else:
return test_case
def lowercase ( a__ : str ) -> Tuple:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(a__ )
else:
return test_case
def lowercase ( a__ : List[Any] ) -> int:
def _require_spacy_model(a__ : Any ):
try:
import spacy # noqa F401
spacy.load(a__ )
except ImportError:
return unittest.skip('''test requires spacy''' )(a__ )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(a__ ) )(a__ )
else:
return test_case
return _require_spacy_model
def lowercase ( a__ : List[str] ) -> Dict:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(a__ )
else:
return test_case
def lowercase ( a__ : int ) -> Any:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(a__ )
else:
return test_case
def lowercase ( a__ : List[Any] ) -> Optional[int]:
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCamelCase = unittest.skip('''test is slow''' )(a__ )
return test_case
def lowercase ( a__ : List[Any] ) -> int:
if not _run_local_tests or _run_local_tests == 0:
_UpperCamelCase = unittest.skip('''test is local''' )(a__ )
return test_case
def lowercase ( a__ : Optional[int] ) -> List[str]:
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCamelCase = unittest.skip('''test is packaged''' )(a__ )
return test_case
def lowercase ( a__ : List[Any] ) -> int:
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCamelCase = unittest.skip('''test requires remote''' )(a__ )
return test_case
def lowercase ( *a__ : Dict ) -> List[Any]:
def decorate(cls : str ):
for name, fn in cls.__dict__.items():
if callable(a__ ) and name.startswith('''test''' ):
for decorator in decorators:
_UpperCamelCase = decorator(a__ )
setattr(cls , a__ , a__ )
return cls
return decorate
class UpperCAmelCase_ ( _lowercase):
pass
class UpperCAmelCase_ ( _lowercase):
snake_case__ = 0
snake_case__ = 1
snake_case__ = 2
@contextmanager
def lowercase ( a__ : List[Any]=OfflineSimulationMode.CONNECTION_FAILS , a__ : Dict=1e-16 ) -> Any:
_UpperCamelCase = requests.Session().request
def timeout_request(a__ : Dict , a__ : str , a__ : Optional[Any] , **a__ : Any ):
# Change the url to an invalid url so that the connection hangs
_UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
_UpperCamelCase = timeout
try:
return online_request(a__ , a__ , **a__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCamelCase = url
_UpperCamelCase = e.args[0]
_UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' , F'''OfflineMock[{url}]''' ),)
_UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(a__ : Optional[int] , a__ : int , **a__ : Any ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=a__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , a__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , a__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , a__ ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def lowercase ( *a__ : Optional[int] , **a__ : List[str] ) -> Optional[Any]:
_UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*a__ , **a__ ) as tmp_dir:
try:
os.chdir(a__ )
yield
finally:
os.chdir(a__ )
@contextmanager
def lowercase ( ) -> List[str]:
import gc
gc.collect()
_UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase ( ) -> List[Any]:
import gc
gc.collect()
_UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase ( a__ : Optional[Any] , a__ : Tuple ) -> List[Any]:
return deepcopy(a__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(a__ ).integers(0 , 100 , 10 ).tolist()
def lowercase ( a__ : Optional[Any] ) -> Tuple:
import decorator
from requests.exceptions import HTTPError
def _wrapper(a__ : Union[str, Any] , *a__ : Tuple , **a__ : Any ):
try:
return func(*a__ , **a__ )
except HTTPError as err:
if str(a__ ).startswith('''500''' ) or str(a__ ).startswith('''502''' ):
pytest.xfail(str(a__ ) )
raise err
return decorator.decorator(_wrapper , a__ )
class UpperCAmelCase_ :
def __init__( self : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> Any:
_UpperCamelCase = returncode
_UpperCamelCase = stdout
_UpperCamelCase = stderr
async def lowercase ( a__ : int , a__ : Tuple ) -> int:
while True:
_UpperCamelCase = await stream.readline()
if line:
callback(a__ )
else:
break
async def lowercase ( a__ : Tuple , a__ : int=None , a__ : int=None , a__ : Optional[Any]=None , a__ : Optional[int]=False , a__ : List[str]=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(a__ ) )
_UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=a__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCamelCase = []
_UpperCamelCase = []
def tee(a__ : Optional[int] , a__ : Optional[int] , a__ : Union[str, Any] , a__ : List[str]="" ):
_UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(a__ )
if not quiet:
print(a__ , a__ , file=a__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda a__ : tee(a__ , a__ , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda a__ : tee(a__ , a__ , sys.stderr , label='''stderr:''' ) ),
] , timeout=a__ , )
return _RunOutput(await p.wait() , a__ , a__ )
def lowercase ( a__ : Any , a__ : List[Any]=None , a__ : List[Any]=None , a__ : Tuple=180 , a__ : Tuple=False , a__ : Optional[int]=True ) -> _RunOutput:
_UpperCamelCase = asyncio.get_event_loop()
_UpperCamelCase = loop.run_until_complete(
_stream_subprocess(a__ , env=a__ , stdin=a__ , timeout=a__ , quiet=a__ , echo=a__ ) )
_UpperCamelCase = ''' '''.join(a__ )
if result.returncode > 0:
_UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def lowercase ( ) -> List[Any]:
_UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
_UpperCamelCase = re.sub(R'''^gw''' , '''''' , a__ , 0 , re.M )
return int(a__ )
def lowercase ( ) -> str:
_UpperCamelCase = 29500
_UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 420 | 1 |
'''simple docstring'''
_lowerCamelCase = 9.8_0665
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ = g ):
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 572 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : Dict , lowercase__ : Optional[int] , lowercase__ : Optional[Any]=7 , lowercase__ : Dict=3 , lowercase__ : Optional[int]=18 , lowercase__ : Any=30 , lowercase__ : Tuple=4_00 , lowercase__ : Dict=True , lowercase__ : List[str]=None , lowercase__ : Tuple=True , lowercase__ : Optional[int]=None , lowercase__ : Any=True , lowercase__ : Union[str, Any]=[0.5, 0.5, 0.5] , lowercase__ : Tuple=[0.5, 0.5, 0.5] , lowercase__ : Optional[Any]=False , ) ->str:
"""simple docstring"""
_lowercase = size if size is not None else {"""height""": 20, """width""": 20}
_lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = image_size
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_center_crop
_lowercase = crop_size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_reduce_labels
def _UpperCAmelCase ( self : Union[str, Any]) ->str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _SCREAMING_SNAKE_CASE ( ):
_lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
_lowercase = Image.open(dataset[0]["""file"""] )
_lowercase = Image.open(dataset[1]["""file"""] )
return image, map
def _SCREAMING_SNAKE_CASE ( ):
_lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
_lowercase = Image.open(ds[0]["""file"""] )
_lowercase = Image.open(ds[1]["""file"""] )
_lowercase = Image.open(ds[2]["""file"""] )
_lowercase = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __a ( _snake_case ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = BeitImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : Any) ->str:
"""simple docstring"""
_lowercase = BeitImageProcessingTester(self)
@property
def _UpperCAmelCase ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , """do_resize"""))
self.assertTrue(hasattr(lowercase__ , """size"""))
self.assertTrue(hasattr(lowercase__ , """do_center_crop"""))
self.assertTrue(hasattr(lowercase__ , """center_crop"""))
self.assertTrue(hasattr(lowercase__ , """do_normalize"""))
self.assertTrue(hasattr(lowercase__ , """image_mean"""))
self.assertTrue(hasattr(lowercase__ , """image_std"""))
def _UpperCAmelCase ( self : Optional[Any]) ->str:
"""simple docstring"""
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20})
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18})
self.assertEqual(image_processor.do_reduce_labels , lowercase__)
_lowercase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowercase__)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84})
self.assertEqual(image_processor.do_reduce_labels , lowercase__)
def _UpperCAmelCase ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
pass
def _UpperCAmelCase ( self : List[str]) ->int:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__)
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image)
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(lowercase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCAmelCase ( self : str) ->int:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__)
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray)
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(lowercase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCAmelCase ( self : Dict) ->Union[str, Any]:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__)
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor)
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(lowercase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCAmelCase ( self : Dict) ->Any:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__)
_lowercase = []
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
_lowercase = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
# Test batched
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
# Test not batched input (PIL images)
_lowercase , _lowercase = prepare_semantic_single_inputs()
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
# Test batched input (PIL images)
_lowercase , _lowercase = prepare_semantic_batch_inputs()
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
def _UpperCAmelCase ( self : Dict) ->Optional[Any]:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_lowercase , _lowercase = prepare_semantic_single_inputs()
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 1_50)
_lowercase = True
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
| 572 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = FunnelTokenizer
__lowerCAmelCase = FunnelTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def snake_case_ ( self ):
super().setUp()
a_ : int = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case_ ( self , **a_ ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **a_ )
def snake_case_ ( self , **a_ ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def snake_case_ ( self , a_ ):
a_ : int = "UNwant\u00E9d,running"
a_ : List[Any] = "unwanted, running"
return input_text, output_text
def snake_case_ ( self ):
a_ : int = self.tokenizer_class(self.vocab_file )
a_ : List[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [7, 4, 5, 1_0, 8, 9] )
def snake_case_ ( self ):
a_ : List[str] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
a_ : Dict = tokenizer("UNwant\u00E9d,running" )
a_ : int = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
a_ : Dict = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len ) | 237 |
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class snake_case_ ( a_ ):
def __init__( self , a_="" , a_="train" ):
assert os.path.isdir(a_ )
a_ : List[Any] = []
a_ : int = os.listdir(a_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
a_ : str = os.path.join(a_ , a_ )
if not os.path.isfile(a_ ):
continue
self.documents.append(a_ )
def __len__( self ):
return len(self.documents )
def __getitem__( self , a_ ):
a_ : Optional[int] = self.documents[idx]
a_ : Optional[int] = document_path.split("/" )[-1]
with open(a_ , encoding="utf-8" ) as source:
a_ : Tuple = source.read()
a_ , a_ : Union[str, Any] = process_story(a_ )
return document_name, story_lines, summary_lines
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Any:
a_ : int = list(filter(lambda SCREAMING_SNAKE_CASE__ : len(SCREAMING_SNAKE_CASE__ ) != 0, [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
a_ : Optional[int] = [_add_missing_period(SCREAMING_SNAKE_CASE__ ) for line in nonempty_lines]
# gather article lines
a_ : Dict = []
a_ : int = deque(SCREAMING_SNAKE_CASE__ )
while True:
try:
a_ : Optional[Any] = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(SCREAMING_SNAKE_CASE__ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
a_ : List[Any] = list(filter(lambda SCREAMING_SNAKE_CASE__ : not t.startswith("@highlight" ), SCREAMING_SNAKE_CASE__ ) )
return story_lines, summary_lines
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
a_ : Dict = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[str]:
if len(SCREAMING_SNAKE_CASE__ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE__ )) )
return sequence
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> int:
a_ : Union[str, Any] = torch.ones_like(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = sequence == pad_token_id
a_ : Tuple = 0
return mask
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
a_ : List[str] = [tokenizer.encode(SCREAMING_SNAKE_CASE__ ) for line in story_lines]
a_ : Optional[int] = [token for sentence in story_lines_token_ids for token in sentence]
a_ : str = [tokenizer.encode(SCREAMING_SNAKE_CASE__ ) for line in summary_lines]
a_ : List[Any] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
a_ : Tuple = []
for sequence in batch:
a_ : List[str] = -1
a_ : Dict = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE__ )
return torch.tensor(SCREAMING_SNAKE_CASE__ ) | 237 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class UpperCAmelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
A : Optional[Any] = "deta"
A : str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(self , _a=None , _a=900 , _a=2_048 , _a=6 , _a=2_048 , _a=8 , _a=6 , _a=1_024 , _a=8 , _a=0.0 , _a=True , _a="relu" , _a=256 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=1.0 , _a=True , _a=False , _a="sine" , _a=5 , _a=4 , _a=4 , _a=True , _a=300 , _a=True , _a=True , _a=1 , _a=5 , _a=2 , _a=1 , _a=1 , _a=5 , _a=2 , _a=0.1 , _a=0.25 , **_a , ) -> Optional[Any]:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase_ : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase_ : List[Any] = backbone_config.pop('model_type' )
lowercase_ : str = CONFIG_MAPPING[backbone_model_type]
lowercase_ : int = config_class.from_dict(_lowerCamelCase )
lowercase_ : str = backbone_config
lowercase_ : Tuple = num_queries
lowercase_ : Any = max_position_embeddings
lowercase_ : List[str] = d_model
lowercase_ : Optional[int] = encoder_ffn_dim
lowercase_ : Optional[int] = encoder_layers
lowercase_ : Any = encoder_attention_heads
lowercase_ : Dict = decoder_ffn_dim
lowercase_ : Dict = decoder_layers
lowercase_ : List[Any] = decoder_attention_heads
lowercase_ : Tuple = dropout
lowercase_ : List[str] = attention_dropout
lowercase_ : int = activation_dropout
lowercase_ : Optional[Any] = activation_function
lowercase_ : Optional[int] = init_std
lowercase_ : Optional[Any] = init_xavier_std
lowercase_ : str = encoder_layerdrop
lowercase_ : List[Any] = auxiliary_loss
lowercase_ : Dict = position_embedding_type
# deformable attributes
lowercase_ : Any = num_feature_levels
lowercase_ : Any = encoder_n_points
lowercase_ : Dict = decoder_n_points
lowercase_ : Union[str, Any] = two_stage
lowercase_ : int = two_stage_num_proposals
lowercase_ : Tuple = with_box_refine
lowercase_ : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowercase_ : Dict = class_cost
lowercase_ : str = bbox_cost
lowercase_ : Union[str, Any] = giou_cost
# Loss coefficients
lowercase_ : int = mask_loss_coefficient
lowercase_ : List[Any] = dice_loss_coefficient
lowercase_ : Union[str, Any] = bbox_loss_coefficient
lowercase_ : Union[str, Any] = giou_loss_coefficient
lowercase_ : Tuple = eos_coefficient
lowercase_ : List[Any] = focal_alpha
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _lowerCamelCase (self ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase (self ) -> Tuple:
return self.d_model
def _lowerCamelCase (self ) -> List[str]:
lowercase_ : Tuple = copy.deepcopy(self.__dict__ )
lowercase_ : str = self.backbone_config.to_dict()
lowercase_ : Dict = self.__class__.model_type
return output
| 701 | '''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : str = botoa.client('iam' )
lowercase_ : Optional[int] = {
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=SCREAMING_SNAKE_CASE_ , AssumeRolePolicyDocument=json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) )
lowercase_ : Optional[Any] = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=SCREAMING_SNAKE_CASE_ , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = botoa.client('iam' )
return iam_client.get_role(RoleName=SCREAMING_SNAKE_CASE_ )["Role"]["Arn"]
def _UpperCamelCase ( ):
lowercase_ : Dict = _ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , SCREAMING_SNAKE_CASE_ , )
lowercase_ : List[Any] = None
if credentials_configuration == 0:
lowercase_ : List[Any] = _ask_field('Enter your AWS Profile name: [default] ' , default='default' )
lowercase_ : Optional[int] = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
lowercase_ : Optional[Any] = _ask_field('AWS Access Key ID: ' )
lowercase_ : Optional[int] = aws_access_key_id
lowercase_ : Optional[Any] = _ask_field('AWS Secret Access Key: ' )
lowercase_ : Optional[int] = aws_secret_access_key
lowercase_ : Any = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
lowercase_ : Tuple = aws_region
lowercase_ : int = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , SCREAMING_SNAKE_CASE_ , )
if role_management == 0:
lowercase_ : int = _ask_field('Enter your IAM role name: ' )
else:
lowercase_ : Any = 'accelerate_sagemaker_execution_role'
print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(SCREAMING_SNAKE_CASE_ )
lowercase_ : List[Any] = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
lowercase_ : Optional[Any] = None
if is_custom_docker_image:
lowercase_ : List[Any] = _ask_field('Enter your Docker image: ' , lambda SCREAMING_SNAKE_CASE_ : str(SCREAMING_SNAKE_CASE_ ).lower() )
lowercase_ : Any = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
lowercase_ : Optional[Any] = None
if is_sagemaker_inputs_enabled:
lowercase_ : Dict = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda SCREAMING_SNAKE_CASE_ : str(SCREAMING_SNAKE_CASE_ ).lower() , )
lowercase_ : Dict = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
lowercase_ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
lowercase_ : Dict = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda SCREAMING_SNAKE_CASE_ : str(SCREAMING_SNAKE_CASE_ ).lower() , )
lowercase_ : int = _ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
lowercase_ : List[str] = {}
lowercase_ : Optional[Any] = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
if use_dynamo:
lowercase_ : List[Any] = 'dynamo_'
lowercase_ : List[Any] = _ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
lowercase_ : int = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
if use_custom_options:
lowercase_ : Union[str, Any] = _ask_options(
'Which mode do you want to use?' , SCREAMING_SNAKE_CASE_ , lambda SCREAMING_SNAKE_CASE_ : TORCH_DYNAMO_MODES[int(SCREAMING_SNAKE_CASE_ )] , default='default' , )
lowercase_ : Union[str, Any] = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
lowercase_ : Tuple = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=SCREAMING_SNAKE_CASE_ , error_message='Please enter yes or no.' , )
lowercase_ : List[str] = 'Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
lowercase_ : Optional[Any] = _ask_options(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lambda SCREAMING_SNAKE_CASE_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(SCREAMING_SNAKE_CASE_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
lowercase_ : Tuple = _ask_field(SCREAMING_SNAKE_CASE_ , lambda SCREAMING_SNAKE_CASE_ : str(SCREAMING_SNAKE_CASE_ ).lower() , default='ml.p3.2xlarge' )
lowercase_ : str = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
lowercase_ : Union[str, Any] = _ask_field(
'How many machines do you want use? [1]: ' , SCREAMING_SNAKE_CASE_ , default=1 , )
lowercase_ : Any = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=SCREAMING_SNAKE_CASE_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=SCREAMING_SNAKE_CASE_ , use_cpu=SCREAMING_SNAKE_CASE_ , dynamo_config=SCREAMING_SNAKE_CASE_ , eca_instance_type=SCREAMING_SNAKE_CASE_ , profile=SCREAMING_SNAKE_CASE_ , region=SCREAMING_SNAKE_CASE_ , iam_role_name=SCREAMING_SNAKE_CASE_ , mixed_precision=SCREAMING_SNAKE_CASE_ , num_machines=SCREAMING_SNAKE_CASE_ , sagemaker_inputs_file=SCREAMING_SNAKE_CASE_ , sagemaker_metrics_file=SCREAMING_SNAKE_CASE_ , )
| 438 | 0 |
from __future__ import annotations
def lowercase__ ( A_: list[int] , A_: list[int] , A_: list[int] , A_: list[list[str]] , A_: int , ) -> None:
"""simple docstring"""
__UpperCAmelCase =len(A_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A_ , A_ , )
def lowercase__ ( A_: int ) -> None:
"""simple docstring"""
__UpperCAmelCase =[]
depth_first_search([] , [] , [] , A_ , A_ )
# Print all the boards
for board in boards:
for column in board:
print(A_ )
print("""""" )
print(len(A_ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 68 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def A_ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = PNDMScheduler()
_UpperCamelCase = PNDMPipeline(unet=a , scheduler=a )
pndm.to(a )
pndm.set_progress_bar_config(disable=a )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pndm(generator=a , num_inference_steps=20 , output_type="""numpy""" ).images
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pndm(generator=a , num_inference_steps=20 , output_type="""numpy""" , return_dict=a )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = """google/ddpm-cifar10-32"""
_UpperCamelCase = UNetaDModel.from_pretrained(a )
_UpperCamelCase = PNDMScheduler()
_UpperCamelCase = PNDMPipeline(unet=a , scheduler=a )
pndm.to(a )
pndm.set_progress_bar_config(disable=a )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pndm(generator=a , output_type="""numpy""" ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 612 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ) -> Union[str, Any]:
pass
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCamelCase : Dict = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ) -> List[str]:
_UpperCamelCase =pipeline(
'''document-question-answering''' , model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
_UpperCamelCase =INVOICE_URL
_UpperCamelCase =list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , '''''' ) ) )
_UpperCamelCase ='''What is the placebo?'''
_UpperCamelCase =[
{
'''image''': load_image(UpperCamelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def UpperCamelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
_UpperCamelCase =dqa_pipeline(UpperCamelCase__ , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
[
{'''score''': ANY(UpperCamelCase__ ), '''answer''': ANY(UpperCamelCase__ ), '''start''': ANY(UpperCamelCase__ ), '''end''': ANY(UpperCamelCase__ )},
{'''score''': ANY(UpperCamelCase__ ), '''answer''': ANY(UpperCamelCase__ ), '''start''': ANY(UpperCamelCase__ ), '''end''': ANY(UpperCamelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self : Optional[Any] ) -> int:
_UpperCamelCase =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
_UpperCamelCase =INVOICE_URL
_UpperCamelCase ='''How many cats are there?'''
_UpperCamelCase =[
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
_UpperCamelCase =dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , UpperCamelCase__ )
_UpperCamelCase =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , UpperCamelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_UpperCamelCase ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase =dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(UpperCamelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
_UpperCamelCase ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase =[]
_UpperCamelCase =[]
_UpperCamelCase =dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , words=UpperCamelCase__ , boxes=UpperCamelCase__ , top_k=2 )
self.assertEqual(UpperCamelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self : str ) -> List[str]:
_UpperCamelCase =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
_UpperCamelCase =INVOICE_URL
_UpperCamelCase ='''What is the invoice number?'''
_UpperCamelCase =dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_UpperCamelCase =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_UpperCamelCase =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
_UpperCamelCase =INVOICE_URL
_UpperCamelCase ='''What is the invoice number?'''
_UpperCamelCase =dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_UpperCamelCase =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_UpperCamelCase =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self : int ) -> Optional[int]:
_UpperCamelCase =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCamelCase__ )
_UpperCamelCase =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCamelCase__ , revision='''3dc6de3''' , )
_UpperCamelCase =INVOICE_URL
_UpperCamelCase ='''What is the invoice number?'''
_UpperCamelCase =dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
_UpperCamelCase =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
_UpperCamelCase =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
_UpperCamelCase =list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
_UpperCamelCase =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self : Any ) -> str:
_UpperCamelCase =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCamelCase__ )
_UpperCamelCase =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCamelCase__ , revision='''3dc6de3''' , max_seq_len=50 , )
_UpperCamelCase =INVOICE_URL
_UpperCamelCase ='''What is the invoice number?'''
_UpperCamelCase =dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_UpperCamelCase =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
_UpperCamelCase =list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
_UpperCamelCase =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def UpperCamelCase__ ( self : List[str] ) -> List[Any]:
_UpperCamelCase =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
_UpperCamelCase =INVOICE_URL
_UpperCamelCase ='''What is the invoice number?'''
_UpperCamelCase =dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def UpperCamelCase__ ( self : Tuple ) -> Optional[int]:
pass
| 271 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Any = logging.get_logger(__name__)
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
_UpperCamelCase =DetaConfig(
backbone_config=__SCREAMING_SNAKE_CASE , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=__SCREAMING_SNAKE_CASE , with_box_refine=__SCREAMING_SNAKE_CASE , two_stage=__SCREAMING_SNAKE_CASE , )
# set labels
_UpperCamelCase ='''huggingface/label-files'''
if "o365" in model_name:
_UpperCamelCase =366
_UpperCamelCase ='''object365-id2label.json'''
else:
_UpperCamelCase =91
_UpperCamelCase ='''coco-detection-id2label.json'''
_UpperCamelCase =num_labels
_UpperCamelCase =json.load(open(cached_download(hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
_UpperCamelCase ={int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCamelCase =idalabel
_UpperCamelCase ={v: k for k, v in idalabel.items()}
return config
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[]
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =dct.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase =state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_UpperCamelCase =state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase =in_proj_weight[:dim, :]
_UpperCamelCase =in_proj_bias[: dim]
_UpperCamelCase =in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase =in_proj_bias[
dim : dim * 2
]
_UpperCamelCase =in_proj_weight[
-dim :, :
]
_UpperCamelCase =in_proj_bias[-dim :]
# fmt: on
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase =state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCamelCase =state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase =in_proj_weight[:hidden_size, :]
_UpperCamelCase =in_proj_bias[:hidden_size]
_UpperCamelCase =in_proj_weight[
hidden_size : hidden_size * 2, :
]
_UpperCamelCase =in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase =in_proj_weight[-hidden_size:, :]
_UpperCamelCase =in_proj_bias[-hidden_size:]
def _a ():
"""simple docstring"""
_UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase =Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =get_deta_config(__SCREAMING_SNAKE_CASE )
# load original state dict
if model_name == "deta-swin-large":
_UpperCamelCase =hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase =hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_UpperCamelCase =torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__SCREAMING_SNAKE_CASE , param.shape )
# rename keys
_UpperCamelCase =create_rename_keys(__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(__SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
if "input_proj" in key:
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
# finally, create HuggingFace model and load state dict
_UpperCamelCase =DetaForObjectDetection(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
_UpperCamelCase ='''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__SCREAMING_SNAKE_CASE )
# load image processor
_UpperCamelCase =DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
_UpperCamelCase =prepare_img()
_UpperCamelCase =processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
_UpperCamelCase =encoding['''pixel_values''']
_UpperCamelCase =model(pixel_values.to(__SCREAMING_SNAKE_CASE ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_UpperCamelCase =torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_UpperCamelCase =torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase =torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_UpperCamelCase =torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__SCREAMING_SNAKE_CASE ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__SCREAMING_SNAKE_CASE ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 271 | 1 |
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def lowercase ( __snake_case : Optional[Any] ):
return input_array.reshape((input_array.size, 1) )
def lowercase ( __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Tuple ):
lowercase_ : Dict = np.nan
for i in range(UpperCamelCase__ ):
lowercase_ : Union[str, Any] = features[:, labels == i]
lowercase_ : Optional[int] = data.mean(1 )
# Centralize the data of class i
lowercase_ : str = data - column_reshape(UpperCamelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCamelCase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase_ : Any = np.dot(UpperCamelCase__ , centered_data.T )
return covariance_sum / features.shape[1]
def lowercase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] ):
lowercase_ : Union[str, Any] = features.mean(1 )
lowercase_ : Tuple = np.nan
for i in range(UpperCamelCase__ ):
lowercase_ : Any = features[:, labels == i]
lowercase_ : Optional[Any] = data.shape[1]
lowercase_ : str = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCamelCase__ ) - column_reshape(UpperCamelCase__ ) , (column_reshape(UpperCamelCase__ ) - column_reshape(UpperCamelCase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase_ : int = device_data * np.dot(
column_reshape(UpperCamelCase__ ) - column_reshape(UpperCamelCase__ ) , (column_reshape(UpperCamelCase__ ) - column_reshape(UpperCamelCase__ )).T , )
return covariance_sum / features.shape[1]
def lowercase ( __snake_case : Dict , __snake_case : Optional[int] ):
# Check if the features have been loaded
if features.any():
lowercase_ : List[str] = features.mean(1 )
# Center the dataset
lowercase_ : Optional[Any] = features - np.reshape(UpperCamelCase__ , (data_mean.size, 1) )
lowercase_ : int = np.dot(UpperCamelCase__ , centered_data.T ) / features.shape[1]
lowercase_ : Optional[int] = np.linalg.eigh(UpperCamelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
lowercase_ : Tuple = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowercase_ : List[str] = np.dot(filtered_eigenvectors.T , UpperCamelCase__ )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=UpperCamelCase__ )
logging.error('''Dataset empty''' )
raise AssertionError
def lowercase ( __snake_case : str , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : str ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowercase_ : Union[str, Any] = eigh(
covariance_between_classes(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , covariance_within_classes(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
lowercase_ : Any = eigenvectors[:, ::-1][:, :dimensions]
lowercase_ : Any = np.linalg.svd(UpperCamelCase__ )
lowercase_ : Tuple = svd_matrix[:, 0:dimensions]
lowercase_ : Tuple = np.dot(filtered_svd_matrix.T , UpperCamelCase__ )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=UpperCamelCase__ )
logging.error('''Dataset empty''' )
raise AssertionError
def lowercase ( ):
# Create dummy dataset with 2 classes and 3 features
lowercase_ : str = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowercase_ : str = np.array([0, 0, 0, 1, 1] )
lowercase_ : List[str] = 2
lowercase_ : List[Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCamelCase__ ) as error_info:
lowercase_ : Tuple = linear_discriminant_analysis(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def lowercase ( ):
lowercase_ : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowercase_ : Optional[int] = 2
lowercase_ : Optional[int] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCamelCase__ ) as error_info:
lowercase_ : List[Any] = principal_component_analysis(UpperCamelCase__ , UpperCamelCase__ )
if not np.allclose(UpperCamelCase__ , UpperCamelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__A =True
except (ImportError, ModuleNotFoundError):
__A =False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _UpperCamelCase ( UpperCamelCase__ ):
re.sub("""<n>""" , """""" , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) ) | 407 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
UpperCAmelCase_ :List[Any] = IFInpaintingPipeline
UpperCAmelCase_ :Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCAmelCase_ :Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase_ :str = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_dummy_components()
def __lowerCAmelCase ( self , __A , __A=0 ) -> Union[str, Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
lowerCAmelCase_ :Any = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase_ :int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ :Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ :Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ :Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> str:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 716 |
"""simple docstring"""
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A=None ) -> Tuple:
lowerCAmelCase_ :Optional[int] = data
lowerCAmelCase_ :List[Any] = None
def __repr__( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = []
lowerCAmelCase_ :int = self
while temp:
string_rep.append(f"""{temp.data}""" )
lowerCAmelCase_ :List[str] = temp.next
return "->".join(__A )
def _snake_case ( lowercase__ : list ) -> Union[str, Any]:
'''simple docstring'''
if not elements_list:
raise Exception("""The Elements List is empty""" )
lowerCAmelCase_ :int = Node(elements_list[0] )
for i in range(1 , len(lowercase__ ) ):
lowerCAmelCase_ :Tuple = Node(elements_list[i] )
lowerCAmelCase_ :Union[str, Any] = current.next
return head
def _snake_case ( lowercase__ : Node ) -> None:
'''simple docstring'''
if head_node is not None and isinstance(lowercase__ , lowercase__ ):
print_reverse(head_node.next )
print(head_node.data )
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase_ :Union[str, Any] = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print("""Linked List:""" )
print(lowercase__ )
print("""Elements in Reverse:""" )
print_reverse(lowercase__ )
if __name__ == "__main__":
main()
| 256 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ):
pass
@is_pipeline_test
@require_vision
class A ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def _UpperCAmelCase ( self ):
UpperCamelCase_ : str = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
UpperCamelCase_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCamelCase_ : Dict = image_classifier(_a , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ) , [
[{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """b"""}, {"""score""": 0.3_33, """label""": """c"""}],
[{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """c"""}, {"""score""": 0.3_33, """label""": """b"""}],
] , )
UpperCamelCase_ : List[str] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
] , )
@require_tf
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
UpperCamelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCamelCase_ : Dict = image_classifier(_a , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(_a ) , [{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """b"""}, {"""score""": 0.3_33, """label""": """c"""}] , )
UpperCamelCase_ : str = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
[
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
{"""score""": 0.3_33, """label""": ANY(_a )},
],
] , )
@slow
@require_torch
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCamelCase_ : Any = image_classifier(_a , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(_a ) , [
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
] , )
UpperCamelCase_ : str = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCamelCase_ : int = image_classifier(_a , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(_a ) , [
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
] , )
UpperCamelCase_ : Dict = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
],
]
* 5 , )
| 208 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_snake_case , _snake_case , _snake_case : List[Any] = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =None
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =None
# Automatically constructed
SCREAMING_SNAKE_CASE__ ="dict"
SCREAMING_SNAKE_CASE__ =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
SCREAMING_SNAKE_CASE__ =field(default="""Audio""" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Optional[int]:
return self.pa_type
def __lowerCAmelCase ( self, _a ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_a, _a ):
return {"bytes": None, "path": value}
elif isinstance(_a, _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__SCREAMING_SNAKE_CASE = BytesIO()
sf.write(_a, value["array"], value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__SCREAMING_SNAKE_CASE = np.frombuffer(value["bytes"], dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
__SCREAMING_SNAKE_CASE = np.memmap(value["path"], dtype="h", mode="r" ).astype(np.floataa ) / 3_27_67
__SCREAMING_SNAKE_CASE = BytesIO(bytes() )
sf.write(_a, _a, value["sampling_rate"], format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self, _a, _a = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
__SCREAMING_SNAKE_CASE = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
__SCREAMING_SNAKE_CASE = token_per_repo_id or {}
__SCREAMING_SNAKE_CASE = path.split("::" )[-1]
try:
__SCREAMING_SNAKE_CASE = string_to_dict(_a, config.HUB_DATASETS_URL )["repo_id"]
__SCREAMING_SNAKE_CASE = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__SCREAMING_SNAKE_CASE = None
with xopen(_a, "rb", use_auth_token=_a ) as f:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(_a )
__SCREAMING_SNAKE_CASE = array.T
if self.mono:
__SCREAMING_SNAKE_CASE = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__SCREAMING_SNAKE_CASE = librosa.resample(_a, orig_sr=_a, target_sr=self.sampling_rate )
__SCREAMING_SNAKE_CASE = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
__SCREAMING_SNAKE_CASE = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("bytes" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__SCREAMING_SNAKE_CASE = storage.field("path" )
else:
__SCREAMING_SNAKE_CASE = pa.array([None] * len(_a ), type=pa.string() )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
return array_cast(_a, self.pa_type )
def __lowerCAmelCase ( self, _a ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a, "rb" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
return bytes_
__SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
__SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
__SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(_a, self.pa_type )
| 693 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""image_processor"""]
a_ = """SamImageProcessor"""
def __init__( self : List[Any] , lowerCAmelCase_ : Dict ) -> Dict:
super().__init__(lowerCAmelCase_ )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -1_0
__lowerCAmelCase = self.image_processor.size['longest_edge']
def __call__( self : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : List[str] , ) -> BatchEncoding:
__lowerCAmelCase = self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor['original_sizes']
if hasattr(lowerCAmelCase_ , 'numpy' ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=lowerCAmelCase_ , input_labels=lowerCAmelCase_ , input_boxes=lowerCAmelCase_ , )
__lowerCAmelCase = self._normalize_and_convert(
lowerCAmelCase_ , lowerCAmelCase_ , input_points=lowerCAmelCase_ , input_labels=lowerCAmelCase_ , input_boxes=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , )
return encoding_image_processor
def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[Any]="pt" , ) -> Dict:
if input_points is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size , lowerCAmelCase_ , original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size , lowerCAmelCase_ , lowerCAmelCase_ )
for point, original_size in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = np.array(lowerCAmelCase_ )
if input_labels is not None:
__lowerCAmelCase = np.array(lowerCAmelCase_ )
if input_boxes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size , lowerCAmelCase_ , original_sizes[0] , is_bounding_box=lowerCAmelCase_ )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size , lowerCAmelCase_ , lowerCAmelCase_ , is_bounding_box=lowerCAmelCase_ )
for box, original_size in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
__lowerCAmelCase = np.array(lowerCAmelCase_ )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(lowerCAmelCase_ )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(lowerCAmelCase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(lowerCAmelCase_ )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(lowerCAmelCase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(lowerCAmelCase_ )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(lowerCAmelCase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] ) -> int:
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(lowerCAmelCase_ ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__lowerCAmelCase = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowerCAmelCase_ )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def lowercase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Any , lowerCAmelCase_ : int=False ) -> np.ndarray:
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(lowerCAmelCase_ , longest_edge=lowerCAmelCase_ )
__lowerCAmelCase = deepcopy(lowerCAmelCase_ ).astype(lowerCAmelCase_ )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1 , 2 , 2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1 , 4 )
return coords
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[Any]=None , ) -> List[str]:
if input_points is not None:
if hasattr(lowerCAmelCase_ , 'numpy' ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not isinstance(input_points[0] , lowerCAmelCase_ ):
raise ValueError('Input points must be a list of list of floating points.' )
__lowerCAmelCase = [np.array(lowerCAmelCase_ ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(lowerCAmelCase_ , 'numpy' ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not isinstance(input_labels[0] , lowerCAmelCase_ ):
raise ValueError('Input labels must be a list of list integers.' )
__lowerCAmelCase = [np.array(lowerCAmelCase_ ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(lowerCAmelCase_ , 'numpy' ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or not isinstance(input_boxes[0] , lowerCAmelCase_ )
or not isinstance(input_boxes[0][0] , lowerCAmelCase_ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
__lowerCAmelCase = [np.array(lowerCAmelCase_ ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def lowercase ( self : Dict ) -> str:
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(lowerCAmelCase_ ) )
def lowercase ( self : List[str] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Tuple ) -> Dict:
return self.image_processor.post_process_masks(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 421 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : List[Any] = '▁'
_snake_case : Tuple = {'vocab_file': 'spiece.model'}
_snake_case : Optional[int] = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
_snake_case : Union[str, Any] = {
'google/reformer-crime-and-punishment': 524288,
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : Any="<unk>" , lowerCAmelCase_ : List[Any]=[] , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Any , ) -> None:
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def lowercase ( self : Any ) -> Any:
return self.sp_model.get_piece_size()
def lowercase ( self : int ) -> Dict[str, int]:
__lowerCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase_ : str ) -> str:
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self : int , lowerCAmelCase_ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : Dict ) -> Tuple:
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def lowercase ( self : Any , lowerCAmelCase_ : int ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
__lowerCAmelCase = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = []
__lowerCAmelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
__lowerCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 421 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = (UnCLIPScheduler,)
def lowerCamelCase__ (self : Union[str, Any] , **_UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_UpperCAmelCase )
return config
def lowerCamelCase__ (self : Union[str, Any] ) -> str:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> Union[str, Any]:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_UpperCAmelCase , prev_timestep=_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowercase__ = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0E-1_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def lowerCamelCase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(variance_type="""learned_range""" )
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=_UpperCAmelCase ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_UpperCAmelCase ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_UpperCAmelCase ) - -0.0_010_011 < 1E-5
def lowerCamelCase__ (self : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = scheduler.timesteps
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = torch.manual_seed(0 )
for i, t in enumerate(_UpperCAmelCase ):
# 1. predict noise residual
lowercase__ = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowercase__ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
lowercase__ = pred_prev_sample
lowercase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(25 )
lowercase__ = scheduler.timesteps
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = torch.manual_seed(0 )
for i, t in enumerate(_UpperCAmelCase ):
# 1. predict noise residual
lowercase__ = model(_UpperCAmelCase , _UpperCAmelCase )
if i + 1 == timesteps.shape[0]:
lowercase__ = None
else:
lowercase__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase__ = scheduler.step(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , prev_timestep=_UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
lowercase__ = pred_prev_sample
lowercase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def lowerCamelCase__ (self : Optional[int] ) -> int:
"""simple docstring"""
pass
def lowerCamelCase__ (self : List[str] ) -> str:
"""simple docstring"""
pass
| 15 | import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__UpperCamelCase : List[Any] = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def A ( _lowercase , _lowercase ):
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , '''sklearn''' )
return (preds == labels).mean()
def A ( _lowercase , _lowercase ):
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , '''sklearn''' )
SCREAMING_SNAKE_CASE : int = simple_accuracy(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Tuple = fa_score(y_true=_lowercase , y_pred=_lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A ( _lowercase , _lowercase ):
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , '''sklearn''' )
SCREAMING_SNAKE_CASE : str = pearsonr(_lowercase , _lowercase )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = spearmanr(_lowercase , _lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A ( _lowercase , _lowercase , _lowercase ):
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , '''sklearn''' )
assert len(_lowercase ) == len(_lowercase ), f"""Predictions and labels have mismatched lengths {len(_lowercase )} and {len(_lowercase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowercase , _lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "mrpc":
return acc_and_fa(_lowercase , _lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowercase , _lowercase )
elif task_name == "qqp":
return acc_and_fa(_lowercase , _lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(_lowercase )
def A ( _lowercase , _lowercase , _lowercase ):
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , '''sklearn''' )
if len(_lowercase ) != len(_lowercase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(_lowercase )} and {len(_lowercase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(_lowercase )
| 248 | 0 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , **A ) -> List[Any]:
super().__init__(**__UpperCamelCase )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self , A , **A ) -> List[str]:
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase ( self , **A ) -> Union[str, Any]:
snake_case : int = {}
if "candidate_labels" in kwargs:
snake_case : int = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
snake_case : Tuple = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , A , A=None , A="This is a sound of {}." ) -> int:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
snake_case : Dict = requests.get(__UpperCamelCase ).content
else:
with open(__UpperCamelCase , """rb""" ) as f:
snake_case : Any = f.read()
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case : int = ffmpeg_read(__UpperCamelCase , self.feature_extractor.sampling_rate )
if not isinstance(__UpperCamelCase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
snake_case : Dict = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
snake_case : List[str] = candidate_labels
snake_case : Any = [hypothesis_template.format(__UpperCamelCase ) for x in candidate_labels]
snake_case : Optional[int] = self.tokenizer(__UpperCamelCase , return_tensors=self.framework , padding=__UpperCamelCase )
snake_case : List[Any] = [text_inputs]
return inputs
def UpperCAmelCase ( self , A ) -> Any:
snake_case : Optional[Any] = model_inputs.pop("""candidate_labels""" )
snake_case : List[str] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __UpperCamelCase ):
snake_case : Optional[Any] = text_inputs[0]
else:
# Batching case.
snake_case : Any = text_inputs[0][0]
snake_case : Optional[Any] = self.model(**__UpperCamelCase , **__UpperCamelCase )
snake_case : Dict = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : Dict = model_outputs.pop("""candidate_labels""" )
snake_case : List[str] = model_outputs["""logits"""][0]
if self.framework == "pt":
snake_case : str = logits.softmax(dim=0 )
snake_case : Union[str, Any] = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
snake_case : Any = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__UpperCamelCase , __UpperCamelCase ) , key=lambda A : -x[0] )
]
return result
| 707 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 0 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE_ : Dict = 1_0
def __UpperCAmelCase ( self : Tuple ,**__A : Tuple ) -> List[str]:
_lowercase = {
'num_train_timesteps': 1100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**__A )
return config
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__A ,beta_end=__A )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__A )
def __UpperCAmelCase ( self : Dict ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def __UpperCAmelCase ( self : str ) -> int:
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowercase = torch.manual_seed(0 )
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowercase = sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowercase = scheduler.scale_model_input(__A ,__A )
_lowercase = model(__A ,__A )
_lowercase = scheduler.step(__A ,__A ,__A ,generator=__A )
_lowercase = output.prev_sample
_lowercase = torch.sum(torch.abs(__A ) )
_lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config(prediction_type='v_prediction' )
_lowercase = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowercase = torch.manual_seed(0 )
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowercase = sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowercase = scheduler.scale_model_input(__A ,__A )
_lowercase = model(__A ,__A )
_lowercase = scheduler.step(__A ,__A ,__A ,generator=__A )
_lowercase = output.prev_sample
_lowercase = torch.sum(torch.abs(__A ) )
_lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps ,device=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowercase = sample.to(__A )
for t in scheduler.timesteps:
_lowercase = scheduler.scale_model_input(__A ,__A )
_lowercase = model(__A ,__A )
_lowercase = scheduler.step(__A ,__A ,__A ,generator=__A )
_lowercase = output.prev_sample
_lowercase = torch.sum(torch.abs(__A ) )
_lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**__A ,use_karras_sigmas=__A )
scheduler.set_timesteps(self.num_inference_steps ,device=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowercase = sample.to(__A )
for t in scheduler.timesteps:
_lowercase = scheduler.scale_model_input(__A ,__A )
_lowercase = model(__A ,__A )
_lowercase = scheduler.step(__A ,__A ,__A ,generator=__A )
_lowercase = output.prev_sample
_lowercase = torch.sum(torch.abs(__A ) )
_lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3 | 67 |
"""simple docstring"""
a : str = range(2, 20 + 1)
a : Optional[Any] = [10**k for k in range(ks[-1] + 1)]
a : dict[int, dict[int, list[list[int]]]] = {}
def lowercase__(A , A , A , A ) ->Any:
"""simple docstring"""
lowercase__ : str= sum(a_i[j] for j in range(A , len(A ) ) )
lowercase__ : int= sum(a_i[j] * base[j] for j in range(min(len(A ) , A ) ) )
lowercase__, lowercase__ : Optional[Any]= 0, 0
lowercase__ : Any= n - i
lowercase__ : Union[str, Any]= memo.get(A )
if sub_memo is not None:
lowercase__ : List[str]= sub_memo.get(A )
if jumps is not None and len(A ) > 0:
# find and make the largest jump without going over
lowercase__ : List[str]= -1
for _k in range(len(A ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase__ : Any= _k
break
if max_jump >= 0:
lowercase__, lowercase__, lowercase__ : str= jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase__ : List[Any]= diff + c
for j in range(min(A , len(A ) ) ):
lowercase__, lowercase__ : Union[str, Any]= divmod(A , 10 )
if new_c > 0:
add(A , A , A )
else:
lowercase__ : Any= []
else:
lowercase__ : List[str]= {c: []}
lowercase__ : Union[str, Any]= sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase__, lowercase__ : Optional[int]= next_term(A , k - 1 , i + dn , A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase__, lowercase__ : str= compute(A , A , i + dn , A )
diff += _diff
dn += terms_jumped
lowercase__ : Optional[Any]= sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase__ : Dict= 0
while j < len(A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A , (diff, dn, k) )
return (diff, dn)
def lowercase__(A , A , A , A ) ->Optional[Any]:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(A ):
a_i.extend([0 for _ in range(k - len(A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase__ : int= i
lowercase__, lowercase__, lowercase__ : Union[str, Any]= 0, 0, 0
for j in range(len(A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase__ : Tuple= ds_c + ds_b
diff += addend
lowercase__ : List[Any]= 0
for j in range(A ):
lowercase__ : int= a_i[j] + addend
lowercase__, lowercase__ : Any= divmod(A , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A , A , A )
return diff, i - start_i
def lowercase__(A , A , A ) ->Any:
"""simple docstring"""
for j in range(A , len(A ) ):
lowercase__ : List[str]= digits[j] + addend
if s >= 10:
lowercase__, lowercase__ : str= divmod(A , 10 )
lowercase__ : Optional[int]= addend // 10 + quotient
else:
lowercase__ : int= s
lowercase__ : Union[str, Any]= addend // 10
if addend == 0:
break
while addend > 0:
lowercase__, lowercase__ : str= divmod(A , 10 )
digits.append(A )
def lowercase__(A = 10**15 ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= [1]
lowercase__ : Dict= 1
lowercase__ : List[Any]= 0
while True:
lowercase__, lowercase__ : List[str]= next_term(A , 20 , i + dn , A )
dn += terms_jumped
if dn == n - i:
break
lowercase__ : int= 0
for j in range(len(A ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 218 | 0 |
from __future__ import annotations
def __UpperCamelCase ( lowerCAmelCase__ : int | str ):
__a : Any = str(lowerCAmelCase__ )
return n == n[::-1]
def __UpperCamelCase ( lowerCAmelCase__ : int = 1_0_0_0_0_0_0 ):
__a : Any = 0
for i in range(1 , lowerCAmelCase__ ):
if is_palindrome(lowerCAmelCase__ ) and is_palindrome(bin(lowerCAmelCase__ ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 326 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowercase__ ='true'
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]=8_2 , lowerCAmelCase__ : List[str]=1_6 ):
set_seed(4_2 )
__a : Dict = RegressionModel()
__a : str = deepcopy(lowerCAmelCase__ )
__a : List[Any] = RegressionDataset(length=lowerCAmelCase__ )
__a : Tuple = DataLoader(lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
model.to(accelerator.device )
__a , __a : Any = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
return model, ddp_model, dataloader
def __UpperCamelCase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int=False ):
__a : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__a : Any = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowerCAmelCase__ : Optional[int] ):
__a : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
with accelerator.main_process_first():
__a : Any = dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__a : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : Dict ):
if use_longest:
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(lowerCAmelCase__ , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1_6 )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ):
__a : Any = Accelerator(dispatch_batches=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
__a : List[str] = get_dataloader(lowerCAmelCase__ , not dispatch_batches )
__a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowerCAmelCase__ )
__a , __a : Tuple = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ):
__a : List[Any] = []
for batch in dataloader:
__a , __a : Optional[int] = batch.values()
with torch.no_grad():
__a : Dict = model(lowerCAmelCase__ )
__a , __a : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__a , __a : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCAmelCase__ )
targs.append(lowerCAmelCase__ )
__a , __a : Tuple = torch.cat(lowerCAmelCase__ ), torch.cat(lowerCAmelCase__ )
return logits, targs
def __UpperCamelCase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : List[Any]=8_2 , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Any=1_6 ):
__a , __a , __a : List[Any] = get_basic_setup(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a , __a : str = generate_predictions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
assert (
len(lowerCAmelCase__ ) == num_samples
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCAmelCase__ )}"
def __UpperCamelCase ( lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False ):
__a : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
__a , __a : Optional[int] = get_mrpc_setup(lowerCAmelCase__ , lowerCAmelCase__ )
# First do baseline
__a , __a , __a : Any = setup['''no''']
model.to(lowerCAmelCase__ )
model.eval()
for batch in dataloader:
batch.to(lowerCAmelCase__ )
with torch.inference_mode():
__a : Union[str, Any] = model(**lowerCAmelCase__ )
__a : Tuple = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCAmelCase__ , references=batch['''labels'''] )
__a : List[Any] = metric.compute()
# Then do distributed
__a , __a , __a : Any = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__a : Optional[int] = model(**lowerCAmelCase__ )
__a : Optional[Any] = outputs.logits.argmax(dim=-1 )
__a : Union[str, Any] = batch['''labels''']
__a , __a : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
__a : Optional[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def __UpperCamelCase ( ):
__a : Optional[Any] = Accelerator(split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__a : List[Any] = Accelerator(split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ )
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(lowerCAmelCase__ , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__a : Dict = Accelerator()
test_torch_metrics(lowerCAmelCase__ , 5_1_2 )
accelerator.state._reset_state()
def __UpperCamelCase ( lowerCAmelCase__ : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 326 | 1 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=30 , _A=2 , _A=3 , _A=True , _A=True , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=10 , _A=0.0_2 , _A=None , _A=2 , ):
__A : str = parent
__A : int = batch_size
__A : Optional[Any] = image_size
__A : str = patch_size
__A : str = num_channels
__A : int = is_training
__A : Optional[int] = use_labels
__A : Optional[int] = hidden_size
__A : int = num_hidden_layers
__A : Dict = num_attention_heads
__A : int = intermediate_size
__A : List[str] = hidden_act
__A : Any = hidden_dropout_prob
__A : Tuple = attention_probs_dropout_prob
__A : int = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = scope
__A : Tuple = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__A : int = (image_size // patch_size) ** 2
__A : Optional[Any] = num_patches + 1
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : str = None
if self.use_labels:
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : List[str] = ViTModel(config=a_ )
model.to(a_ )
model.eval()
__A : Any = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Union[str, Any] = ViTForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
__A : Union[str, Any] = model(a_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A : List[Any] = 1
__A : int = ViTForMaskedImageModeling(a_ )
model.to(a_ )
model.eval()
__A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Any = self.type_sequence_label_size
__A : List[Any] = ViTForImageClassification(a_ )
model.to(a_ )
model.eval()
__A : str = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A : List[str] = 1
__A : str = ViTForImageClassification(a_ )
model.to(a_ )
model.eval()
__A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A : str = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.prepare_config_and_inputs()
(
__A
) : str = config_and_inputs
__A : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _A( _A , _A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase : Optional[int] = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Tuple = True
UpperCamelCase : Any = False
UpperCamelCase : Dict = False
UpperCamelCase : Union[str, Any] = False
def UpperCAmelCase_ ( self ):
__A : str = ViTModelTester(self )
__A : Dict = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Any = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = model_class(a_ )
__A : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Any = [*signature.parameters.keys()]
__A : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def UpperCAmelCase_ ( self ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCAmelCase_ ( self ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def UpperCAmelCase_ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Union[str, Any] = ViTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__A : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ):
__A : int = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a_ )
__A : Tuple = self.default_image_processor
__A : List[str] = prepare_img()
__A : List[str] = image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
__A : int = model(**a_ )
# verify the logits
__A : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
__A : Any = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
__A : Optional[Any] = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a_ )
__A : int = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
__A : Union[str, Any] = prepare_img()
__A : List[Any] = image_processor(images=a_ , return_tensors='pt' )
__A : List[Any] = inputs.pixel_values.to(a_ )
# forward pass
with torch.no_grad():
__A : Tuple = model(a_ , interpolate_pos_encoding=a_ )
# verify the logits
__A : Optional[int] = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a_ )
__A : List[Any] = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase_ ( self ):
__A : Dict = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
__A : Union[str, Any] = self.default_image_processor
__A : Tuple = prepare_img()
__A : Any = image_processor(images=a_ , return_tensors='pt' )
__A : Optional[Any] = inputs.pixel_values.to(a_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__A : Dict = model(a_ )
| 239 |
'''simple docstring'''
def __A ( a_ : list[list[float]] ):
lowerCAmelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a_ ):
if len(a_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a_ ) )
return data_lists
def __A ( a_ : list[list[float]] ,a_ : list[int] ):
lowerCAmelCase : list[list[float]] = []
for dlist, weight in zip(a_ ,a_ ):
lowerCAmelCase : Optional[Any] = min(a_ )
lowerCAmelCase : List[str] = max(a_ )
lowerCAmelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCAmelCase : List[Any] = f'''Invalid weight of {weight:f} provided'''
raise ValueError(a_ )
score_lists.append(a_ )
return score_lists
def __A ( a_ : list[list[float]] ):
lowerCAmelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a_ ):
lowerCAmelCase : Optional[Any] = final_scores[j] + ele
return final_scores
def __A ( a_ : list[list[float]] ,a_ : list[int] ):
lowerCAmelCase : Union[str, Any] = get_data(a_ )
lowerCAmelCase : List[Any] = calculate_each_score(a_ ,a_ )
lowerCAmelCase : Optional[Any] = generate_final_scores(a_ )
# append scores to source data
for i, ele in enumerate(a_ ):
source_data[i].append(a_ )
return source_data
| 525 | 0 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = 'Hello world! cécé herlolip'
__UpperCAmelCase = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BertAbsConfig(
temp_dir=""".""" , finetune_bert=__UpperCamelCase , large=__UpperCamelCase , share_emb=__UpperCamelCase , use_bert_emb=__UpperCamelCase , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
UpperCAmelCase__ : Optional[Any] = torch.load(__UpperCamelCase , lambda __UpperCamelCase , __UpperCamelCase : storage )
UpperCAmelCase__ : int = AbsSummarizer(__UpperCamelCase , torch.device("""cpu""" ) , __UpperCamelCase )
original.eval()
UpperCAmelCase__ : Dict = BertAbsSummarizer(__UpperCamelCase , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
UpperCAmelCase__ : Any = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
UpperCAmelCase__ : List[Any] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__UpperCamelCase )) )
UpperCAmelCase__ : Tuple = torch.tensor(__UpperCamelCase ).unsqueeze(0 )
UpperCAmelCase__ : Union[str, Any] = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__UpperCamelCase )) )
UpperCAmelCase__ : str = torch.tensor(__UpperCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase__ : Tuple = encoder_input_ids
UpperCAmelCase__ : Optional[int] = decoder_input_ids
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Dict = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase__ : Any = original(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )[0]
UpperCAmelCase__ : Tuple = original.generator(__UpperCamelCase )
UpperCAmelCase__ : Tuple = new_model(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )[0]
UpperCAmelCase__ : Optional[Any] = new_model.generator(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__UpperCamelCase ) )
UpperCAmelCase__ : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__UpperCAmelCase = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 194 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__UpperCAmelCase = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class __lowercase ( tr.AbstractTransform ):
def __init__( self : List[str] ,A : str = " " ):
'''simple docstring'''
UpperCAmelCase__ : str = sentence_delimiter
def __lowercase ( self : Union[str, Any] ,A : str ):
'''simple docstring'''
return list(A )
def __lowercase ( self : int ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
for sent_idx, sentence in enumerate(A ):
chars.extend(self.process_string(A ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(A ) - 1:
chars.append(self.sentence_delimiter )
return chars
__UpperCAmelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__UpperCAmelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__UpperCAmelCase = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__UpperCAmelCase = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
__UpperCAmelCase = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def __lowercase ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/jitsi/jiwer/"""] ,reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] ,)
def __lowercase ( self : Union[str, Any] ,A : Optional[Any] ,A : Tuple ,A : str=False ):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
A ,A ,truth_transform=A ,hypothesis_transform=A ,)["wer"]
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Any = 0
for prediction, reference in zip(A ,A ):
UpperCAmelCase__ : List[str] = jiwer.compute_measures(
A ,A ,truth_transform=A ,hypothesis_transform=A ,)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 194 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def UpperCamelCase ( _a ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def UpperCamelCase ( _a , _a , _a ) -> np.ndarray:
'''simple docstring'''
lowercase_ :int = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(_a , _a )
# Predict target for test data
lowercase_ :Tuple = xgb.predict(_a )
lowercase_ :List[str] = predictions.reshape(len(_a ) , 1 )
return predictions
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :Union[str, Any] = fetch_california_housing()
lowercase_ , lowercase_ :int = data_handling(_a )
lowercase_ , lowercase_ , lowercase_ , lowercase_ :int = train_test_split(
_a , _a , test_size=0.25 , random_state=1 )
lowercase_ :Union[str, Any] = xgboost(_a , _a , _a )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(_a , _a )}" )
print(f"Mean Square Error : {mean_squared_error(_a , _a )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 257 |
from typing import Any
import numpy as np
def UpperCamelCase ( _a ) -> bool:
'''simple docstring'''
return np.array_equal(_a , matrix.conjugate().T )
def UpperCamelCase ( _a , _a ) -> Any:
'''simple docstring'''
lowercase_ :str = v.conjugate().T
lowercase_ :int = v_star.dot(_a )
assert isinstance(_a , np.ndarray )
return (v_star_dot.dot(_a )) / (v_star.dot(_a ))
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :str = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase_ :Optional[int] = np.array([[1], [2], [3]] )
assert is_hermitian(_a ), f"{a} is not hermitian."
print(rayleigh_quotient(_a , _a ) )
lowercase_ :Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_a ), f"{a} is not hermitian."
assert rayleigh_quotient(_a , _a ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 257 | 1 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[int] = (EulerDiscreteScheduler,)
_lowercase : List[Any] = 1_0
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_lowercase )
return config
def _lowercase ( self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = scheduler.scale_model_input(_lowercase , _lowercase )
_lowerCAmelCase = model(_lowercase , _lowercase )
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase )
_lowerCAmelCase = output.prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = scheduler.scale_model_input(_lowercase , _lowercase )
_lowerCAmelCase = model(_lowercase , _lowercase )
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase )
_lowerCAmelCase = output.prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowercase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCAmelCase = sample.to(_lowercase )
for t in scheduler.timesteps:
_lowerCAmelCase = scheduler.scale_model_input(_lowercase , _lowercase )
_lowerCAmelCase = model(_lowercase , _lowercase )
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase )
_lowerCAmelCase = output.prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase , use_karras_sigmas=_lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowercase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCAmelCase = sample.to(_lowercase )
for t in scheduler.timesteps:
_lowerCAmelCase = scheduler.scale_model_input(_lowercase , _lowercase )
_lowerCAmelCase = model(_lowercase , _lowercase )
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase )
_lowerCAmelCase = output.prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 162 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def A (__lowerCamelCase :Any ):
_lowerCAmelCase = np.max(__lowerCamelCase , axis=-1 , keepdims=__lowerCamelCase )
_lowerCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "second_text" in kwargs:
_lowerCAmelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None ):
"""simple docstring"""
return self.tokenizer(_lowercase , text_pair=_lowercase , return_tensors=self.framework )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.model(**_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.logits[0].numpy()
_lowerCAmelCase = softmax(_lowercase )
_lowerCAmelCase = np.argmax(_lowercase )
_lowerCAmelCase = self.model.config.idalabel[best_class]
_lowerCAmelCase = probabilities[best_class].item()
_lowerCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 162 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 205 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
def A_( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('google/mt5-small' )
SCREAMING_SNAKE_CASE_ = tokenizer('Hello there' , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE_ = tokenizer('Hi I am' , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ).loss
SCREAMING_SNAKE_CASE_ = -tf.math.reduce_mean(SCREAMING_SNAKE_CASE ).numpy()
SCREAMING_SNAKE_CASE_ = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 205 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase_ : Optional[Any] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
lowerCAmelCase_ : int = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : Any = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase_ : List[str] = len(SCREAMING_SNAKE_CASE_ )
self.assertGreater(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'score': ANY(SCREAMING_SNAKE_CASE_ ),
'label': ANY(SCREAMING_SNAKE_CASE_ ),
'box': {'xmin': ANY(SCREAMING_SNAKE_CASE_ ), 'ymin': ANY(SCREAMING_SNAKE_CASE_ ), 'xmax': ANY(SCREAMING_SNAKE_CASE_ ), 'ymax': ANY(SCREAMING_SNAKE_CASE_ )},
}
for i in range(SCREAMING_SNAKE_CASE_ )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
pass
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Dict = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
lowerCAmelCase_ : Union[str, Any] = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
] , )
lowerCAmelCase_ : int = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
]
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : str = pipeline('zero-shot-object-detection' )
lowerCAmelCase_ : Dict = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
] , )
lowerCAmelCase_ : Optional[int] = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
],
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
pass
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Tuple = 0.2
lowerCAmelCase_ : Any = pipeline('zero-shot-object-detection' )
lowerCAmelCase_ : str = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=SCREAMING_SNAKE_CASE_ , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : Any = pipeline('zero-shot-object-detection' )
lowerCAmelCase_ : int = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=SCREAMING_SNAKE_CASE_ , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
] , )
| 317 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase__ : Optional[Any] = get_logger(__name__)
lowercase__ : Tuple = Path(__file__).parent / """model_card_template.md"""
lowercase__ : Optional[Any] = uuida().hex
lowercase__ : Any = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Tuple = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Dict = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def UpperCamelCase_ ( lowerCAmelCase__ : Union[Dict, str, None] = None ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
ua += "; " + user_agent
return ua
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None ) -> Union[str, Any]:
"""simple docstring"""
if token is None:
lowerCAmelCase_ : Any = HfFolder.get_token()
if organization is None:
lowerCAmelCase_ : Union[str, Any] = whoami(lowerCAmelCase__ )['name']
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(lowerCAmelCase__ , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
lowerCAmelCase_ : List[str] = args.hub_token if hasattr(lowerCAmelCase__ , 'hub_token' ) else None
lowerCAmelCase_ : List[Any] = get_full_repo_name(lowerCAmelCase__ , token=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=lowerCAmelCase__ , model_name=lowerCAmelCase__ , repo_name=lowerCAmelCase__ , dataset_name=args.dataset_name if hasattr(lowerCAmelCase__ , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(lowerCAmelCase__ , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase__ , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase__ , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(lowerCAmelCase__ , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(lowerCAmelCase__ , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(lowerCAmelCase__ , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(lowerCAmelCase__ , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(lowerCAmelCase__ , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(lowerCAmelCase__ , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(lowerCAmelCase__ , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
lowerCAmelCase_ : Tuple = os.path.join(args.output_dir , 'README.md' )
model_card.save(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[str] , lowerCAmelCase__ : Optional[str] = None ) -> Tuple:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
lowerCAmelCase_ : Tuple = str(Path(lowerCAmelCase__ ).as_posix() )
lowerCAmelCase_ : Any = re.search(R'snapshots/([^/]+)/' , lowerCAmelCase__ )
if search is None:
return None
lowerCAmelCase_ : Tuple = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(lowerCAmelCase__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase__ : int = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
lowercase__ : Optional[int] = os.path.join(hf_cache_home, """diffusers""")
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None ) -> None:
"""simple docstring"""
if new_cache_dir is None:
lowerCAmelCase_ : Any = DIFFUSERS_CACHE
if old_cache_dir is None:
lowerCAmelCase_ : Optional[int] = old_diffusers_cache
lowerCAmelCase_ : Optional[int] = Path(lowerCAmelCase__ ).expanduser()
lowerCAmelCase_ : Dict = Path(lowerCAmelCase__ ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowerCAmelCase_ : List[str] = new_cache_dir / old_blob_path.relative_to(lowerCAmelCase__ )
new_blob_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
os.replace(lowerCAmelCase__ , lowerCAmelCase__ )
try:
os.symlink(lowerCAmelCase__ , lowerCAmelCase__ )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase__ : Any = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
lowercase__ : int = 0
else:
with open(cache_version_file) as f:
try:
lowercase__ : int = int(f.read())
except ValueError:
lowercase__ : Any = 0
if cache_version < 1:
lowercase__ : int = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
lowercase__ : int = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
"""the directory exists and can be written to."""
)
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if variant is not None:
lowerCAmelCase_ : Any = weights_name.split('.' )
lowerCAmelCase_ : List[Any] = splits[:-1] + [variant] + splits[-1:]
lowerCAmelCase_ : Any = '.'.join(lowerCAmelCase__ )
return weights_name
def UpperCamelCase_ ( lowerCAmelCase__ : List[str] , *,
lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int]=None , ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = str(lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
return pretrained_model_name_or_path
elif os.path.isdir(lowerCAmelCase__ ):
if os.path.isfile(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ):
# Load from a PyTorch checkpoint
lowerCAmelCase_ : Dict = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ):
lowerCAmelCase_ : List[str] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(lowerCAmelCase__ ).base_version ) >= version.parse('0.20.0' )
):
try:
lowerCAmelCase_ : Dict = hf_hub_download(
lowerCAmelCase__ , filename=_add_variant(lowerCAmelCase__ , lowerCAmelCase__ ) , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , user_agent=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , lowerCAmelCase__ , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(lowerCAmelCase__ , lowerCAmelCase__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(lowerCAmelCase__ , lowerCAmelCase__ )}' so that the correct variant file can be added." , lowerCAmelCase__ , )
try:
# 2. Load model file as usual
lowerCAmelCase_ : Optional[Any] = hf_hub_download(
lowerCAmelCase__ , filename=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , user_agent=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'this model name. Check the model page at '
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 317 | 1 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowerCAmelCase__ :Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __a :
_a : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
_a : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_a : int = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_a : bool = field(
default=UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.task_name.lower()
class __a ( UpperCAmelCase ):
_a : Dict = 'train'
_a : Optional[Any] = 'dev'
_a : Any = 'test'
class __a ( UpperCAmelCase ):
_a : GlueDataTrainingArguments
_a : str
_a : List[InputFeatures]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = Split.train , _SCREAMING_SNAKE_CASE = None , ) -> Dict:
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = args
_UpperCAmelCase = glue_processors[args.task_name]()
_UpperCAmelCase = glue_output_modes[args.task_name]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
_UpperCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
_UpperCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
_UpperCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase = label_list[2], label_list[1]
_UpperCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase = cached_features_file + '.lock'
with FileLock(_SCREAMING_SNAKE_CASE ):
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
_UpperCAmelCase = time.time()
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
_UpperCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_UpperCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
_UpperCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_UpperCAmelCase = examples[:limit_length]
_UpperCAmelCase = glue_convert_examples_to_features(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_length=args.max_seq_length , label_list=_SCREAMING_SNAKE_CASE , output_mode=self.output_mode , )
_UpperCAmelCase = time.time()
torch.save(self.features , _SCREAMING_SNAKE_CASE )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> int:
"""simple docstring"""
return len(self.features )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return self.label_list
| 618 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ :Any = logging.get_logger(__name__)
lowerCAmelCase__ :Union[str, Any] = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class __a ( UpperCAmelCase ):
_a : Any = 'xmod'
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=("en_XX",) , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
_UpperCAmelCase = pre_norm
_UpperCAmelCase = adapter_reduction_factor
_UpperCAmelCase = adapter_layer_norm
_UpperCAmelCase = adapter_reuse_layer_norm
_UpperCAmelCase = ln_before_adapter
_UpperCAmelCase = list(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = default_language
class __a ( UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 618 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
"""simple docstring"""
def __init__( self , UpperCamelCase__=None , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
super().__init__(features=UpperCamelCase__ )
lowerCamelCase_ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
import torch
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and column:
if all(
isinstance(UpperCamelCase__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase__ )
return column
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
import torch
if isinstance(UpperCamelCase__ , (str, bytes, type(UpperCamelCase__ )) ):
return value
elif isinstance(UpperCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCamelCase_ = {}
if isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowerCamelCase_ = {'''dtype''': torch.intaa}
elif isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCamelCase_ = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowerCamelCase_ = np.asarray(UpperCamelCase__ )
return torch.tensor(UpperCamelCase__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase__ , '''__array__''' ) and not isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCamelCase__ , map_list=UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Mapping:
'''simple docstring'''
lowerCamelCase_ = self.numpy_arrow_extractor().extract_row(UpperCamelCase__ )
lowerCamelCase_ = self.python_features_decoder.decode_row(UpperCamelCase__ )
return self.recursive_tensorize(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> "torch.Tensor":
'''simple docstring'''
lowerCamelCase_ = self.numpy_arrow_extractor().extract_column(UpperCamelCase__ )
lowerCamelCase_ = self.python_features_decoder.decode_column(UpperCamelCase__ , pa_table.column_names[0] )
lowerCamelCase_ = self.recursive_tensorize(UpperCamelCase__ )
lowerCamelCase_ = self._consolidate(UpperCamelCase__ )
return column
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Mapping:
'''simple docstring'''
lowerCamelCase_ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase__ )
lowerCamelCase_ = self.python_features_decoder.decode_batch(UpperCamelCase__ )
lowerCamelCase_ = self.recursive_tensorize(UpperCamelCase__ )
for column_name in batch:
lowerCamelCase_ = self._consolidate(batch[column_name] )
return batch | 66 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__lowercase :Tuple = FlaxAutoencoderKL
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = 4
lowerCamelCase_ = 3
lowerCamelCase_ = (32, 32)
lowerCamelCase_ = jax.random.PRNGKey(0 )
lowerCamelCase_ = jax.random.uniform(UpperCamelCase__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowerCamelCase_ = self.dummy_input
return init_dict, inputs_dict | 66 | 1 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__lowerCAmelCase : Optional[int] = object()
# For specifying empty leaf dict `{}`
__lowerCAmelCase : List[str] = object()
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
__lowercase : List[Any] = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__A ) - len(__A ) + 1 ):
__lowercase : Union[str, Any] = [x.match(__A ) for x, y in zip(__A , ks[i:] )]
if matches and all(__A ):
return True
return False
def UpperCAmelCase_ ( __lowerCAmelCase ) -> List[str]:
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__A , __A ):
return replacement
return val
return replace
def UpperCAmelCase_ ( ) -> List[Any]:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __A )),
(("transformer", "wte", "embedding"), P('''mp''' , __A )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__A , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __A )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__A , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __A )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCAmelCase_ ( __lowerCAmelCase ) -> str:
__lowercase : Tuple = _get_partition_rules()
__lowercase : Tuple = _replacement_rules(__A )
__lowercase : Optional[Any] = {k: _unmatched for k in flatten_dict(__A )}
__lowercase : Optional[Any] = {k: replace(__A , __A ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__A ) )
| 509 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['DeiTFeatureExtractor']
__lowerCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 466 | 0 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase__( snake_case_ ):
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
__lowercase = nn.ModuleList(__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(__UpperCAmelCase , __UpperCAmelCase , self.nets ) ):
__lowercase , __lowercase = controlnet(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
# merge samples
if i == 0:
__lowercase , __lowercase = down_samples, mid_sample
else:
__lowercase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__UpperCAmelCase , __UpperCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
"""simple docstring"""
__lowercase = 0
__lowercase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__UpperCAmelCase , is_main_process=__UpperCAmelCase , save_function=__UpperCAmelCase , safe_serialization=__UpperCAmelCase , variant=__UpperCAmelCase , )
idx += 1
__lowercase = model_path_to_save + F'''_{idx}'''
@classmethod
def __magic_name__ ( cls , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
__lowercase = 0
__lowercase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowercase = pretrained_model_path
while os.path.isdir(__UpperCAmelCase ):
__lowercase = ControlNetModel.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
controlnets.append(__UpperCAmelCase )
idx += 1
__lowercase = pretrained_model_path + F'''_{idx}'''
logger.info(F'''{len(__UpperCAmelCase )} controlnets loaded from {pretrained_model_path}.''' )
if len(__UpperCAmelCase ) == 0:
raise ValueError(
F'''No ControlNets found under {os.path.dirname(__UpperCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(__UpperCAmelCase )
| 339 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : List[Any] = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def lowercase__ ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__lowercase = k.replace(__UpperCamelCase , __UpperCamelCase )
if k.startswith("""encoder""" ):
__lowercase = k.replace(""".attn""" , """.self_attn""" )
__lowercase = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowercase = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__lowercase = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowercase = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__lowercase = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowercase__ ( __UpperCamelCase : str ):
'''simple docstring'''
__lowercase = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__lowercase = sd.pop(__UpperCamelCase )
__lowercase = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__lowercase = v
snake_case : int = ['START']
@torch.no_grad()
def lowercase__ ( __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Dict ):
'''simple docstring'''
__lowercase = torch.load(__UpperCamelCase , map_location="""cpu""" )
__lowercase = model["""model"""]
__lowercase = BlenderbotConfig.from_json_file(__UpperCamelCase )
__lowercase = BlenderbotForConditionalGeneration(__UpperCamelCase )
__lowercase = m.model.state_dict().keys()
__lowercase = []
__lowercase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__lowercase = rename_state_dict_key(__UpperCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__lowercase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__UpperCamelCase )
m.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
m.half()
m.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
snake_case : Dict = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 339 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : List[str] = 'convbert'
def __init__( self : List[str] , snake_case : Tuple=30522 , snake_case : Any=768 , snake_case : Dict=12 , snake_case : Union[str, Any]=12 , snake_case : List[str]=3072 , snake_case : Union[str, Any]="gelu" , snake_case : List[Any]=0.1 , snake_case : List[str]=0.1 , snake_case : Union[str, Any]=512 , snake_case : Dict=2 , snake_case : List[str]=0.02 , snake_case : List[Any]=1E-12 , snake_case : Tuple=1 , snake_case : Tuple=0 , snake_case : int=2 , snake_case : Optional[Any]=768 , snake_case : List[Any]=2 , snake_case : Optional[int]=9 , snake_case : List[str]=1 , snake_case : Tuple=None , **snake_case : Optional[int] , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case , )
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = embedding_size
SCREAMING_SNAKE_CASE : Tuple = head_ratio
SCREAMING_SNAKE_CASE : List[str] = conv_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_groups
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 352 |
from datetime import datetime
import requests
def __a ( __lowerCAmelCase ) -> bytes:
SCREAMING_SNAKE_CASE : int = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
SCREAMING_SNAKE_CASE : Any = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(__lowerCAmelCase ).content
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input("""Enter Video/IGTV url: """).strip()
_lowerCamelCase : int = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""") | 352 | 1 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: str , **lowerCAmelCase: Tuple )-> str:
_snake_case : List[str] = AutoConfig.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
_snake_case : Union[str, Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 718 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__a :Tuple = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
__a :List[Any] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
__a :List[str] = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
__a :List[str] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
__a :str = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Dict ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]=[1, 10, 100] , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=UpperCAmelCase ) as executor:
A_ = []
A_ = Counter()
A_ = 0
A_ = defaultdict(UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(UpperCAmelCase , UpperCAmelCase ) ):
for candidate in candidates:
A_ = candidate + "\n" + test_case
A_ = (test_program, timeout, task_id, completion_id[task_id])
A_ = executor.submit(UpperCAmelCase , *UpperCAmelCase )
futures.append(UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCAmelCase ):
A_ = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
A_ , A_ = [], []
for result in results.values():
result.sort()
A_ = [r[1]["passed"] for r in result]
total.append(len(UpperCAmelCase ) )
correct.append(sum(UpperCAmelCase ) )
A_ = np.array(UpperCAmelCase )
A_ = np.array(UpperCAmelCase )
A_ = k
A_ = {f'''pass@{k}''': estimate_pass_at_k(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
def estimator(__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = itertools.repeat(__UpperCamelCase ,len(__UpperCamelCase ) )
else:
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = iter(__UpperCamelCase )
return np.array([estimator(int(__UpperCamelCase ) ,int(__UpperCamelCase ) ,__UpperCamelCase ) for n, c in zip(__UpperCamelCase ,__UpperCamelCase )] ) | 86 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase_ ( snake_case_ ):
def __init__( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = params
SCREAMING_SNAKE_CASE : List[str] = np.array(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = np.array([len(lowerCAmelCase__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , lowerCAmelCase__ : Any ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.lengths )
def __lowercase ( self : int ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.params.max_model_input_size
SCREAMING_SNAKE_CASE : str = self.lengths > max_len
logger.info(F"""Splitting {sum(lowerCAmelCase__ )} too long sequences.""" )
def divide_chunks(lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ):
return [l[i : i + n] for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )]
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : int = []
if self.params.mlm:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
SCREAMING_SNAKE_CASE : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
SCREAMING_SNAKE_CASE : Optional[Any] = np.insert(lowerCAmelCase__ , 0 , lowerCAmelCase__ )
if sub_s[-1] != sep_id:
SCREAMING_SNAKE_CASE : List[Any] = np.insert(lowerCAmelCase__ , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCAmelCase__ )
new_tok_ids.extend(lowerCAmelCase__ )
new_lengths.extend([len(lowerCAmelCase__ ) for l in sub_seqs] )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = np.array(lowerCAmelCase__ )
def __lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = len(self )
SCREAMING_SNAKE_CASE : Any = self.lengths > 11
SCREAMING_SNAKE_CASE : str = self.token_ids[indices]
SCREAMING_SNAKE_CASE : Any = self.lengths[indices]
SCREAMING_SNAKE_CASE : Dict = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.params.special_tok_ids['''unk_token''']
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
SCREAMING_SNAKE_CASE : Optional[Any] = (unk_occs / self.lengths) < 0.5
SCREAMING_SNAKE_CASE : Union[str, Any] = self.token_ids[indices]
SCREAMING_SNAKE_CASE : Dict = self.lengths[indices]
SCREAMING_SNAKE_CASE : Optional[Any] = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __lowercase ( self : Dict ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowercase ( self : List[str] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [t[0] for t in batch]
SCREAMING_SNAKE_CASE : Dict = [t[1] for t in batch]
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
# Max for paddings
SCREAMING_SNAKE_CASE : Tuple = max(lowerCAmelCase__ )
# Pad token ids
if self.params.mlm:
SCREAMING_SNAKE_CASE : List[str] = self.params.special_tok_ids['''pad_token''']
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.params.special_tok_ids['''unk_token''']
SCREAMING_SNAKE_CASE : List[str] = [list(t.astype(lowerCAmelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(lowerCAmelCase__ )) for t in token_ids]
assert len(tk_ ) == len(lowerCAmelCase__ )
assert all(len(lowerCAmelCase__ ) == max_seq_len_ for t in tk_ )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(tk_ ) # (bs, max_seq_len_)
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowerCAmelCase__ ) # (bs)
return tk_t, lg_t
| 464 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase ( A : Tuple , A : Optional[Any] , A : Dict , A : Any ):
SCREAMING_SNAKE_CASE : List[str] = sorted(zip(A , A ) , key=lambda A : x[0] / x[1] , reverse=A )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = [i[0] for i in r], [i[1] for i in r]
SCREAMING_SNAKE_CASE : Union[str, Any] = list(accumulate(A ) )
SCREAMING_SNAKE_CASE : List[Any] = bisect(A , A )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 464 | 1 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase = 1 , UpperCamelCase = 1000 ) -> int:
"""simple docstring"""
__UpperCAmelCase : int = 1
__UpperCAmelCase : Tuple = 0
for divide_by_number in range(UpperCamelCase , digit + 1 ):
__UpperCAmelCase : list[int] = []
__UpperCAmelCase : int = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCamelCase ):
__UpperCAmelCase : Any = len(UpperCamelCase )
__UpperCAmelCase : Optional[int] = divide_by_number
else:
has_been_divided.append(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = """▁"""
A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
A = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
A = {
"""google/pegasus-xsum""": 512,
}
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PegasusTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : str , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Tuple="<mask_2>" , UpperCamelCase_ : Any="<mask_1>" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=103 , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_):
raise TypeError(
F"additional_special_tokens should be of type {type(UpperCamelCase_)}, but is"
F" {type(UpperCamelCase_)}")
__UpperCAmelCase : Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(UpperCamelCase_) , self.offset - 1)
]
if len(set(UpperCamelCase_)) != len(UpperCamelCase_):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.")
__UpperCAmelCase : str = additional_special_tokens_extended
else:
__UpperCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset)]
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = vocab_file
__UpperCAmelCase : List[str] = False if not self.vocab_file else True
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : int = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
F" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}")
return [1 if x in all_special_ids else 0 for x in seq]
def a_ ( self : Union[str, Any] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase_)
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase_) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def a_ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(UpperCamelCase_):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_):
copyfile(self.vocab_file , UpperCamelCase_)
return (out_vocab_file,)
| 77 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def UpperCAmelCase_ ( __lowercase : int ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(__A , __A )
def UpperCAmelCase_ ( __lowercase : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(__A , __A , bias=__A )
_UpperCAmelCase = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Any="facebook/mbart-large-en-ro" , __lowercase : Dict=False , __lowercase : List[Any]=False ) -> int:
'''simple docstring'''
_UpperCAmelCase = torch.load(__A , map_location="cpu" )["""model"""]
remove_ignore_keys_(__A )
_UpperCAmelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_UpperCAmelCase = MBartConfig.from_pretrained(__A , vocab_size=__A )
if mbart_aa and finetuned:
_UpperCAmelCase = """relu"""
_UpperCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
_UpperCAmelCase = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
_UpperCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
__SCREAMING_SNAKE_CASE :Dict = parser.parse_args()
__SCREAMING_SNAKE_CASE :Any = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 715 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> List[str]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A_ ( nn.Module ):
def __init__( self : str , snake_case_ : nn.Module , snake_case_ : int ):
super().__init__()
_UpperCAmelCase = module
_UpperCAmelCase = nn.Sequential(
nn.Linear(module.in_features , snake_case_ , bias=snake_case_ ) , nn.Linear(snake_case_ , module.out_features , bias=snake_case_ ) , )
_UpperCAmelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=snake_case_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase ( self : int , snake_case_ : List[str] , *snake_case_ : Dict , **snake_case_ : Dict ):
return self.module(snake_case_ , *snake_case_ , **snake_case_ ) + self.adapter(snake_case_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A_ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_lowerCamelCase : Any = """bigscience/bloom-1b7"""
# Constant values
_lowerCamelCase : Optional[int] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
_lowerCamelCase : str = """Hello my name is"""
_lowerCamelCase : List[Any] = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
_lowerCamelCase : List[Any] = 10
def lowercase ( self : Dict ):
# Models and tokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained(self.model_name )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : str ):
super().setUp()
# Models and tokenizer
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
def lowercase ( self : Any ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Tuple ):
_UpperCAmelCase = self.model_abit.config
self.assertTrue(hasattr(snake_case_ , "quantization_config" ) )
_UpperCAmelCase = config.to_dict()
_UpperCAmelCase = config.to_diff_dict()
_UpperCAmelCase = config.to_json_string()
def lowercase ( self : Optional[Any] ):
from bitsandbytes.nn import Paramsabit
_UpperCAmelCase = self.model_fpaa.get_memory_footprint()
_UpperCAmelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
_UpperCAmelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase ( self : Tuple ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(snake_case_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
_UpperCAmelCase = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
def lowercase ( self : Tuple ):
_UpperCAmelCase = BitsAndBytesConfig()
_UpperCAmelCase = True
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
_UpperCAmelCase = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
def lowercase ( self : List[str] ):
with self.assertRaises(snake_case_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase = BitsAndBytesConfig()
with self.assertRaises(snake_case_ ):
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case_ , load_in_abit=snake_case_ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def lowercase ( self : List[Any] ):
with self.assertRaises(snake_case_ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(snake_case_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
_UpperCAmelCase = self.model_fpaa.to(torch.floataa )
_UpperCAmelCase = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
_UpperCAmelCase = self.model_fpaa.to("cpu" )
# Check this does not throw an error
_UpperCAmelCase = self.model_fpaa.half()
# Check this does not throw an error
_UpperCAmelCase = self.model_fpaa.float()
def lowercase ( self : str ):
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=snake_case_ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A_ ( unittest.TestCase ):
@classmethod
def lowercase ( cls : List[Any] ):
_UpperCAmelCase = "t5-small"
_UpperCAmelCase = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
_UpperCAmelCase = AutoTokenizer.from_pretrained(cls.model_name )
_UpperCAmelCase = "Translate in German: Hello, my dog is cute"
def lowercase ( self : Dict ):
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Any ):
from transformers import TaForConditionalGeneration
_UpperCAmelCase = TaForConditionalGeneration._keep_in_fpaa_modules
_UpperCAmelCase = None
# test with `t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
# test with `flan-t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
_UpperCAmelCase = modules
def lowercase ( self : Any ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
# test with `flan-t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : List[str] ):
super().setUp()
# model_name
_UpperCAmelCase = "bigscience/bloom-560m"
_UpperCAmelCase = "t5-small"
# Different types of model
_UpperCAmelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# Sequence classification model
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# CausalLM model
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# Seq2seq model
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=snake_case_ , device_map="auto" )
def lowercase ( self : Dict ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Tuple ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Union[str, Any] ):
super().setUp()
def lowercase ( self : Optional[int] ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_UpperCAmelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Tuple ):
super().setUp()
def lowercase ( self : List[Any] ):
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=snake_case_ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
_UpperCAmelCase = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Dict ):
_UpperCAmelCase = "facebook/opt-350m"
super().setUp()
def lowercase ( self : Dict ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
_UpperCAmelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_UpperCAmelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(snake_case_ ) ):
_UpperCAmelCase = LoRALayer(module.q_proj , rank=1_6 )
_UpperCAmelCase = LoRALayer(module.k_proj , rank=1_6 )
_UpperCAmelCase = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
_UpperCAmelCase = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_UpperCAmelCase = model.forward(**snake_case_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(snake_case_ , snake_case_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(snake_case_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """gpt2-xl"""
_lowerCamelCase : str = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 119 | 0 |
def __lowerCAmelCase ( _UpperCamelCase ) -> int:
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __lowerCAmelCase ( _UpperCamelCase ) -> bool:
'''simple docstring'''
lowerCamelCase__: Optional[Any] = 0
lowerCamelCase__: Tuple = number
while duplicate > 0:
lowerCamelCase__ , lowerCamelCase__: int = divmod(_UpperCamelCase , 10 )
fact_sum += factorial(_UpperCamelCase )
return fact_sum == number
if __name__ == "__main__":
print('Program to check whether a number is a Krisnamurthy Number or not.')
_lowercase = int(input('Enter number: ').strip())
print(
F"""{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number."""
)
| 306 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> Any:
'''simple docstring'''
if attention_mask is None:
lowerCamelCase__: List[str] = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCamelCase__ :
__lowerCamelCase = OPTConfig
__lowerCamelCase = {}
__lowerCamelCase = """gelu"""
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Dict=13 , __a : Dict=7 , __a : Optional[Any]=True , __a : Any=False , __a : Tuple=99 , __a : Optional[int]=16 , __a : Any=2 , __a : Optional[Any]=4 , __a : Union[str, Any]=4 , __a : Tuple="gelu" , __a : Optional[int]=0.1 , __a : int=0.1 , __a : List[Any]=20 , __a : Tuple=2 , __a : str=1 , __a : str=0 , __a : List[Any]=16 , __a : Optional[Any]=16 , ):
'''simple docstring'''
lowerCamelCase__: List[str] = parent
lowerCamelCase__: List[str] = batch_size
lowerCamelCase__: Dict = seq_length
lowerCamelCase__: List[str] = is_training
lowerCamelCase__: Dict = use_labels
lowerCamelCase__: Union[str, Any] = vocab_size
lowerCamelCase__: Union[str, Any] = hidden_size
lowerCamelCase__: Any = num_hidden_layers
lowerCamelCase__: Union[str, Any] = num_attention_heads
lowerCamelCase__: Tuple = intermediate_size
lowerCamelCase__: Optional[int] = hidden_act
lowerCamelCase__: Union[str, Any] = hidden_dropout_prob
lowerCamelCase__: str = attention_probs_dropout_prob
lowerCamelCase__: List[str] = max_position_embeddings
lowerCamelCase__: Tuple = eos_token_id
lowerCamelCase__: Any = pad_token_id
lowerCamelCase__: str = bos_token_id
lowerCamelCase__: Optional[int] = embed_dim
lowerCamelCase__: Union[str, Any] = word_embed_proj_dim
lowerCamelCase__: List[Any] = False
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase__: Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase__: Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase__: Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__a , **self.config_updates , )
lowerCamelCase__: Optional[Any] = prepare_opt_inputs_dict(__a , __a )
return config, inputs_dict
def lowerCamelCase_ ( self : str , __a : Optional[Any] , __a : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = TFOPTModel(config=__a )
lowerCamelCase__: Optional[Any] = inputs_dict["""input_ids"""]
lowerCamelCase__: Dict = input_ids[:1, :]
lowerCamelCase__: Any = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase__: Any = 1
# first forward pass
lowerCamelCase__: str = model(__a , attention_mask=__a , use_cache=__a )
lowerCamelCase__ , lowerCamelCase__: Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__: Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__: Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase__: str = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase__: int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase__: Any = model(__a , attention_mask=__a )[0]
lowerCamelCase__: Any = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase__: str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase__: Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__: List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
@require_tf
class lowerCamelCase__ ( A__ , A__ , unittest.TestCase ):
__lowerCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__lowerCamelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
__lowerCamelCase = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = 10
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: int = TFOPTModelTester(self )
lowerCamelCase__: Tuple = ConfigTester(self , config_class=__a )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: str = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__a : Optional[int] , __a : Dict ):
if hasattr(__a , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__a , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase__: int = model_class(config=__a )
lowerCamelCase__: Tuple = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCamelCase__: Optional[int] = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__a )
lowerCamelCase__: str = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCamelCase__: List[str] = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase__: Any = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __a )
# check that weights remain the same after resizing
lowerCamelCase__: Optional[Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase__: Any = False
self.assertTrue(__a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __a )
lowerCamelCase__: List[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase__: List[Any] = False
self.assertTrue(__a )
def __lowerCAmelCase ( _UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(_UpperCamelCase , dtype=tf.intaa )
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
__lowerCamelCase = 99
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: int = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase__: Optional[int] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase__: Any = input_ids.shape[0]
lowerCamelCase__: List[str] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: int = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowerCamelCase__: List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowerCamelCase__: Optional[Any] = tf.not_equal(__a , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase__: str = model(input_ids=__a , attention_mask=__a ).last_hidden_state
lowerCamelCase__: str = (1, 11, 512)
self.assertEqual(output.shape , __a )
lowerCamelCase__: str = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-3 ) )
lowerCamelCase__: Optional[int] = tf.function(__a , jit_compile=__a )
lowerCamelCase__: List[Any] = xla_generate(__a , __a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-2 ) )
@require_tf
@slow
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
lowerCamelCase__: List[Any] = """facebook/opt-350m"""
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: Dict = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase__: Dict = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase__: Union[str, Any] = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase__: Union[str, Any] = tokenizer(__a , return_tensors="""tf""" , padding=__a , add_special_tokens=__a )
lowerCamelCase__: Union[str, Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase__: Dict = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__a , __a , atol=1e-4 ) )
lowerCamelCase__: Any = tf.function(__a , jit_compile=__a )
lowerCamelCase__: List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__a , __a , atol=1e-4 ) )
@require_tf
@slow
class lowerCamelCase__ ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = """facebook/opt-125m"""
lowerCamelCase__: Dict = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase__: Any = []
lowerCamelCase__: Optional[Any] = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase__: str = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCamelCase__: Dict = tokenizer(__a , return_tensors="""tf""" ).input_ids
lowerCamelCase__: Any = model.generate(__a , max_length=10 )
lowerCamelCase__: Optional[int] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = """facebook/opt-350m"""
lowerCamelCase__: Tuple = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase__: Any = TFOPTForCausalLM.from_pretrained(__a )
lowerCamelCase__: Tuple = """left"""
# use different length sentences to test batching
lowerCamelCase__: Tuple = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase__: List[Any] = tokenizer(__a , return_tensors="""tf""" , padding=__a )
lowerCamelCase__: Any = inputs["""input_ids"""]
lowerCamelCase__: int = model.generate(input_ids=__a , attention_mask=inputs["""attention_mask"""] )
lowerCamelCase__: Optional[int] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCamelCase__: Optional[Any] = model.generate(input_ids=__a )
lowerCamelCase__: int = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowerCamelCase__: Dict = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCamelCase__: str = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings )
lowerCamelCase__: List[str] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
lowerCamelCase__: Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
lowerCamelCase__: Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
lowerCamelCase__: Tuple = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: Dict = """facebook/opt-350m"""
lowerCamelCase__: Tuple = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase__: Dict = []
lowerCamelCase__: int = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase__: List[Any] = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCamelCase__: str = tokenizer(__a , return_tensors="""tf""" ).input_ids
lowerCamelCase__: Optional[int] = model.generate(__a , max_length=10 )
lowerCamelCase__: Any = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
| 306 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = XLMTokenizer
__snake_case = False
def UpperCamelCase_ ( self ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_SCREAMING_SNAKE_CASE : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : List[Any] = "lower newer"
_SCREAMING_SNAKE_CASE : Optional[int] = "lower newer"
return input_text, output_text
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = XLMTokenizer(self.vocab_file , self.merges_file )
_SCREAMING_SNAKE_CASE : Dict = "lower"
_SCREAMING_SNAKE_CASE : Optional[int] = ["low", "er</w>"]
_SCREAMING_SNAKE_CASE : int = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = tokens + ["<unk>"]
_SCREAMING_SNAKE_CASE : Optional[int] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 712 |
from __future__ import annotations
import requests
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(__lowerCamelCase ).json()
def lowerCamelCase__ (__lowerCamelCase = 10 ):
_SCREAMING_SNAKE_CASE : List[Any] = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_SCREAMING_SNAKE_CASE : Optional[Any] = requests.get(__lowerCamelCase ).json()[:max_stories]
return [get_hackernews_story(__lowerCamelCase ) for story_id in story_ids]
def lowerCamelCase__ (__lowerCamelCase = 10 ):
_SCREAMING_SNAKE_CASE : List[str] = hackernews_top_stories(__lowerCamelCase )
return "\n".join("* [{title}]({url})".format(**__lowerCamelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 381 | 0 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
def UpperCAmelCase__ (*UpperCamelCase_ ,**UpperCamelCase_ ):
"""simple docstring"""
requires_backends(_UpperCamelCase ,['''torch'''] )
def UpperCAmelCase__ (*UpperCamelCase_ ,**UpperCamelCase_ ):
"""simple docstring"""
requires_backends(_UpperCamelCase ,['''torch'''] )
def UpperCAmelCase__ (*UpperCamelCase_ ,**UpperCamelCase_ ):
"""simple docstring"""
requires_backends(_UpperCamelCase ,['''torch'''] )
def UpperCAmelCase__ (*UpperCamelCase_ ,**UpperCamelCase_ ):
"""simple docstring"""
requires_backends(_UpperCamelCase ,['''torch'''] )
def UpperCAmelCase__ (*UpperCamelCase_ ,**UpperCamelCase_ ):
"""simple docstring"""
requires_backends(_UpperCamelCase ,['''torch'''] )
def UpperCAmelCase__ (*UpperCamelCase_ ,**UpperCamelCase_ ):
"""simple docstring"""
requires_backends(_UpperCamelCase ,['''torch'''] )
def UpperCAmelCase__ (*UpperCamelCase_ ,**UpperCamelCase_ ):
"""simple docstring"""
requires_backends(_UpperCamelCase ,['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['torch']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''torch'''] )
| 550 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class _UpperCAmelCase ( nn.Module ):
__lowerCamelCase: int
__lowerCamelCase: jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , a : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = hidden_states.shape
lowercase_ : Tuple = jax.image.resize(
a , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
lowercase_ : List[Any] = self.conv(a )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
__lowerCamelCase: int
__lowerCamelCase: jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , a : int ):
'''simple docstring'''
lowercase_ : Any = self.conv(a )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
__lowerCamelCase: int
__lowerCamelCase: int = None
__lowerCamelCase: float = 0.0
__lowerCamelCase: bool = None
__lowerCamelCase: jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.in_channels if self.out_channels is None else self.out_channels
lowercase_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ : Tuple = nn.Conv(
a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ : List[str] = nn.Dense(a , dtype=self.dtype )
lowercase_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ : Any = nn.Dropout(self.dropout_prob )
lowercase_ : Dict = nn.Conv(
a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ : Tuple = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase_ : Optional[Any] = None
if use_nin_shortcut:
lowercase_ : Union[str, Any] = nn.Conv(
a , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self : List[str] , a : str , a : Dict , a : List[str]=True ):
'''simple docstring'''
lowercase_ : Dict = hidden_states
lowercase_ : int = self.norma(a )
lowercase_ : List[Any] = nn.swish(a )
lowercase_ : Dict = self.conva(a )
lowercase_ : Optional[int] = self.time_emb_proj(nn.swish(a ) )
lowercase_ : Tuple = jnp.expand_dims(jnp.expand_dims(a , 1 ) , 1 )
lowercase_ : List[str] = hidden_states + temb
lowercase_ : Optional[Any] = self.norma(a )
lowercase_ : Any = nn.swish(a )
lowercase_ : List[str] = self.dropout(a , a )
lowercase_ : int = self.conva(a )
if self.conv_shortcut is not None:
lowercase_ : List[str] = self.conv_shortcut(a )
return hidden_states + residual
| 620 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCamelCase__ = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
UpperCamelCase__ = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
UpperCamelCase__ = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
return float((preds == labels).mean() )
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
a_ = simple_accuracy(_A , _A )
a_ = float(fa_score(y_true=_A , y_pred=_A ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
a_ = float(pearsonr(_A , _A )[0] )
a_ = float(spearmanr(_A , _A )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def __magic_name__ ( self : List[Any] ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __magic_name__ ( self : Any , lowercase__ : Dict , lowercase__ : Tuple ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase__ , lowercase__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase__ , lowercase__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase__ , lowercase__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase__ , lowercase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 143 |
import qiskit
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
a_ = qiskit.Aer.get_backend('''aer_simulator''' )
a_ = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
a_ = qiskit.execute(_A , _A , shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_A )
if __name__ == "__main__":
UpperCamelCase__ = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 143 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = inspect.getfile(accelerate.test_utils )
lowercase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowercase = test_metrics
@require_cpu
def A__ ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def A__ ( self ):
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def A__ ( self ):
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def A__ ( self ):
"""simple docstring"""
print(f'Found {torch.cuda.device_count()} devices.' )
lowercase = ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
| 359 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
'nielsr/canine-s': 20_48,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCAmelCase : int = 1_11_41_12
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Any = 0XE_0_0_0
lowerCAmelCase : Tuple = 0XE_0_0_1
lowerCAmelCase : List[Any] = 0XE_0_0_2
lowerCAmelCase : int = 0XE_0_0_3
lowerCAmelCase : List[Any] = 0XE_0_0_4
# Maps special codepoints to human-readable names.
lowerCAmelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2048 , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : str = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , model_max_length=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE_ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE_ : str = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE_ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE_ : int = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE_ : Optional[int] = len(self._special_codepoints )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self._unicode_vocab_size
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return list(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
return ord(_SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : int = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
result += ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return result
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : int = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
return ()
| 511 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
A__ : str = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
A__ : List[Any] = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
A__ : List[str] = max(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ), b_binary.zfill(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 498 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
A__ : str = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
A__ : List[Any] = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
A__ : List[str] = max(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ), b_binary.zfill(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 498 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
SCREAMING_SNAKE_CASE__ : int =logging.get_logger(__name__)
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> None:
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 434 | """simple docstring"""
SCREAMING_SNAKE_CASE__ : int =6_5521
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
_lowerCamelCase : Union[str, Any] = 1
_lowerCamelCase : List[str] = 0
for plain_chr in plain_text:
_lowerCamelCase : Dict = (a + ord(SCREAMING_SNAKE_CASE_ )) % MOD_ADLER
_lowerCamelCase : Tuple = (b + a) % MOD_ADLER
return (b << 16) | a
| 434 | 1 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _A (UpperCamelCase : Any ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ : Dict = model.config
lowerCamelCase__ : List[Any] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowerCamelCase__ : List[Any] = MBartConfig(
is_decoder=UpperCamelCase , is_encoder_decoder=UpperCamelCase , add_cross_attention=UpperCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase , add_final_layer_norm=UpperCamelCase , )
return encoder_config, decoder_config
def _A (UpperCamelCase : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
if "encoder.model" in name:
lowerCamelCase__ : int = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowerCamelCase__ : Dict = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowerCamelCase__ : int = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase__ : Tuple = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowerCamelCase__ : Any = """encoder.""" + name
if "attn.proj" in name:
lowerCamelCase__ : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowerCamelCase__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase__ : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowerCamelCase__ : str = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowerCamelCase__ : Dict = """encoder.layernorm.bias"""
return name
def _A (UpperCamelCase : Optional[Any] , UpperCamelCase : int ) ->Optional[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : List[str] = orig_state_dict.pop(UpperCamelCase )
if "qkv" in key:
lowerCamelCase__ : int = key.split(""".""" )
lowerCamelCase__ : List[str] = int(key_split[3] )
lowerCamelCase__ : Optional[Any] = int(key_split[5] )
lowerCamelCase__ : str = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase__ : Union[str, Any] = val[:dim, :]
lowerCamelCase__ : Optional[int] = val[dim : dim * 2, :]
lowerCamelCase__ : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase__ : List[Any] = val[:dim]
lowerCamelCase__ : Optional[Any] = val[dim : dim * 2]
lowerCamelCase__ : Dict = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowerCamelCase__ : List[str] = val
return orig_state_dict
def _A (UpperCamelCase : int , UpperCamelCase : List[Any]=None , UpperCamelCase : Dict=False ) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ : Dict = DonutModel.from_pretrained(UpperCamelCase ).eval()
# load HuggingFace model
lowerCamelCase__ ,lowerCamelCase__ : List[str] = get_configs(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = DonutSwinModel(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = MBartForCausalLM(UpperCamelCase )
lowerCamelCase__ : Any = VisionEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase )
model.eval()
lowerCamelCase__ : List[Any] = original_model.state_dict()
lowerCamelCase__ : List[Any] = convert_state_dict(UpperCamelCase , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# verify results on scanned document
lowerCamelCase__ : str = load_dataset("""hf-internal-testing/example-documents""" )
lowerCamelCase__ : List[Any] = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowerCamelCase__ : List[Any] = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase , from_slow=UpperCamelCase )
lowerCamelCase__ : Tuple = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowerCamelCase__ : Optional[int] = DonutProcessor(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = processor(UpperCamelCase , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowerCamelCase__ : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowerCamelCase__ : Union[str, Any] = """When is the coffee break?"""
lowerCamelCase__ : List[str] = task_prompt.replace("""{user_input}""" , UpperCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowerCamelCase__ : str = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowerCamelCase__ : str = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowerCamelCase__ : Any = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowerCamelCase__ : List[str] = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowerCamelCase__ : int = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowerCamelCase__ : List[Any] = original_model.decoder.tokenizer(UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors="""pt""" )[
"""input_ids"""
]
lowerCamelCase__ : Tuple = original_model.encoder.model.patch_embed(UpperCamelCase )
lowerCamelCase__ ,lowerCamelCase__ : str = model.encoder.embeddings(UpperCamelCase )
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
# verify encoder hidden states
lowerCamelCase__ : Any = original_model.encoder(UpperCamelCase )
lowerCamelCase__ : Optional[int] = model.encoder(UpperCamelCase ).last_hidden_state
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-2 )
# verify decoder hidden states
lowerCamelCase__ : int = original_model(UpperCamelCase , UpperCamelCase , UpperCamelCase ).logits
lowerCamelCase__ : Dict = model(UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
_lowercase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 96 |
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
while b:
lowerCamelCase__ ,lowerCamelCase__ : int = b, a % b
return a
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase , a % b )
def _A () ->str:
'''simple docstring'''
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 96 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 129 | '''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A_ ( snake_case ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(snake_case , "_dynamo" ):
return False
return isinstance(snake_case , torch._dynamo.eval_frame.OptimizedModule )
def A_ ( snake_case , snake_case = True ):
SCREAMING_SNAKE_CASE:Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE:List[Any] = is_compiled_module(snake_case )
if is_compiled:
SCREAMING_SNAKE_CASE:Optional[Any] = model
SCREAMING_SNAKE_CASE:Optional[Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:int = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE:Optional[Any] = getattr(snake_case , "forward" )
SCREAMING_SNAKE_CASE:Dict = model.__dict__.pop("_original_forward" , snake_case )
if original_forward is not None:
while hasattr(snake_case , "__wrapped__" ):
SCREAMING_SNAKE_CASE:Tuple = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE:int = forward
if getattr(snake_case , "_converted_to_transformer_engine" , snake_case ):
convert_model(snake_case , to_transformer_engine=snake_case )
if is_compiled:
SCREAMING_SNAKE_CASE:Any = model
SCREAMING_SNAKE_CASE:Union[str, Any] = compiled_model
return model
def A_ ( ):
PartialState().wait_for_everyone()
def A_ ( snake_case , snake_case ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(snake_case , snake_case )
elif PartialState().local_process_index == 0:
torch.save(snake_case , snake_case )
@contextmanager
def A_ ( **snake_case ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE:Optional[int] = str(snake_case )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A_ ( snake_case ):
if not hasattr(snake_case , "__qualname__" ) and not hasattr(snake_case , "__name__" ):
SCREAMING_SNAKE_CASE:Union[str, Any] = getattr(snake_case , "__class__" , snake_case )
if hasattr(snake_case , "__qualname__" ):
return obj.__qualname__
if hasattr(snake_case , "__name__" ):
return obj.__name__
return str(snake_case )
def A_ ( snake_case , snake_case ):
for key, value in source.items():
if isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:str = destination.setdefault(snake_case , {} )
merge_dicts(snake_case , snake_case )
else:
SCREAMING_SNAKE_CASE:List[Any] = value
return destination
def A_ ( snake_case = None ):
if port is None:
SCREAMING_SNAKE_CASE:str = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 465 |
'''simple docstring'''
def A_ ( snake_case = 100 ):
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:Optional[int] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 465 | 1 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = tmp_path / """cache"""
UpperCamelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase : Tuple = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = tmp_path / """cache"""
UpperCamelCase : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase : Union[str, Any] = features.copy() if features else default_expected_features
UpperCamelCase : str = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase : Optional[int] = JsonDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = tmp_path / """cache"""
UpperCamelCase : Optional[int] = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
UpperCamelCase : Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase : int = JsonDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCamelCase : str = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
UpperCamelCase : Tuple = features.copy()
UpperCamelCase : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase : Optional[int] = tmp_path / """cache"""
UpperCamelCase : List[Any] = JsonDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = tmp_path / """cache"""
UpperCamelCase : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase : Dict = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = jsonl_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = [jsonl_path]
UpperCamelCase : List[Any] = tmp_path / """cache"""
UpperCamelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase : Optional[int] = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=("train",) ):
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
UpperCamelCase : Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = tmp_path / """cache"""
UpperCamelCase : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase : Any = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = tmp_path / """cache"""
UpperCamelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase : Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase : Optional[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase : Optional[Any] = JsonDatasetReader({"""train""": jsonl_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if split:
UpperCamelCase : List[Any] = {split: jsonl_path}
else:
UpperCamelCase : Tuple = """train"""
UpperCamelCase : Union[str, Any] = {"""train""": jsonl_path, """test""": jsonl_path}
UpperCamelCase : Optional[Any] = tmp_path / """cache"""
UpperCamelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase : Union[str, Any] = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return json.load(SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return [json.loads(SCREAMING_SNAKE_CASE ) for line in buffer]
class lowercase__ :
"""simple docstring"""
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_A , _A , lines=_A ).write()
buffer.seek(0 )
UpperCamelCase : Optional[Any] = load_json_function(_A )
assert isinstance(_A , _A )
assert isinstance(exported_content[0] , _A )
assert len(_A ) == 1_0
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _a ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_A , _A , lines=_A , orient=_A ).write()
buffer.seek(0 )
UpperCamelCase : str = load_json(_A )
assert isinstance(_A , _A )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_A , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(_A ) == 1_0
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_A , _A , lines=_A , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase : List[Any] = load_json_function(_A )
assert isinstance(_A , _A )
assert isinstance(exported_content[0] , _A )
assert len(_A ) == 1_0
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _a ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_A , _A , lines=_A , orient=_A , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase : Dict = load_json(_A )
assert isinstance(_A , _A )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_A , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(_A ) == 1_0
def _a ( self , _A ):
'''simple docstring'''
with pytest.raises(_A ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_A , _A , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def _a ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / f"""test.json.{extension}"""
UpperCamelCase : Tuple = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(_A , _A , compression=_A ).write()
with fsspec.open(_A , """rb""" , compression="""infer""" ) as f:
UpperCamelCase : Dict = f.read()
with fsspec.open(_A , """rb""" , compression="""infer""" ) as f:
UpperCamelCase : Tuple = f.read()
assert exported_content == original_content
| 102 |
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: List[str] = 0
UpperCAmelCase_: Tuple = len(lowerCAmelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase_: Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase__ ):
return None
UpperCAmelCase_: str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase_: int = left
UpperCAmelCase_: str = point
elif point > right:
UpperCAmelCase_: List[str] = right
UpperCAmelCase_: Optional[int] = point
else:
if item < current_item:
UpperCAmelCase_: Optional[Any] = point - 1
else:
UpperCAmelCase_: Any = point + 1
return None
def lowerCAmelCase_ (lowerCAmelCase__: Dict , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Any , lowerCAmelCase__: Any ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase_: Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCAmelCase__ , lowerCAmelCase__ , point + 1 , lowerCAmelCase__ )
def lowerCAmelCase_ (lowerCAmelCase__: Dict ):
"""simple docstring"""
if collection != sorted(lowerCAmelCase__ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : int = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a : int = 67
a : Any = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 556 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ : str = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""openbmb/cpm-ant-10b""": 10_24,
}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = collections.OrderedDict()
with open(snake_case, '''r''', encoding='''utf-8''' ) as reader:
__magic_name__ :List[Any] = reader.readlines()
for index, token in enumerate(snake_case ):
__magic_name__ :List[Any] = token.rstrip('''\n''' )
__magic_name__ :List[Any] = index
return vocab
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<unk>" , __lowerCAmelCase=2_0_0 ):
"""simple docstring"""
__magic_name__ :Optional[int] = vocab
__magic_name__ :Optional[Any] = unk_token
__magic_name__ :List[Any] = max_input_chars_per_word
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = list(__lowerCAmelCase )
if len(__lowerCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
__magic_name__ :Optional[Any] = 0
__magic_name__ :str = []
while start < len(__lowerCAmelCase ):
__magic_name__ :List[str] = len(__lowerCAmelCase )
__magic_name__ :List[Any] = None
while start < end:
__magic_name__ :Union[str, Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
__magic_name__ :Optional[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__lowerCAmelCase )
__magic_name__ :int = end
return sub_tokens
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
a__ = False
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<d>" , __lowerCAmelCase="</d>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="</n>" , __lowerCAmelCase="</_>" , __lowerCAmelCase="left" , **__lowerCAmelCase , ):
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=__lowerCAmelCase , eod_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , line_token=__lowerCAmelCase , space_token=__lowerCAmelCase , padding_side=__lowerCAmelCase , **__lowerCAmelCase , )
__magic_name__ :Optional[Any] = bod_token
__magic_name__ :int = eod_token
__magic_name__ :List[str] = load_vocab(__lowerCAmelCase )
__magic_name__ :Optional[Any] = self.encoder[space_token]
__magic_name__ :Optional[int] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__magic_name__ :List[str] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCAmelCase : x[1] ) )
__magic_name__ :Dict = {v: k for k, v in self.encoder.items()}
__magic_name__ :Union[str, Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def A ( self ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def A ( self ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def A ( self ):
"""simple docstring"""
return self.encoder["\n"]
@property
def A ( self ):
"""simple docstring"""
return len(self.encoder )
def A ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = []
for x in jieba.cut(__lowerCAmelCase , cut_all=__lowerCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowerCAmelCase ) )
return output_tokens
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = [i for i in token_ids if i >= 0]
__magic_name__ :Tuple = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return token in self.encoder
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return "".join(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if os.path.isdir(__lowerCAmelCase ):
__magic_name__ :Any = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__magic_name__ :List[Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__magic_name__ :Tuple = 0
if " " in self.encoder:
__magic_name__ :Any = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__magic_name__ :Optional[Any] = self.encoder['''\n''']
del self.encoder["\n"]
__magic_name__ :List[str] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCAmelCase : x[1] ) )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
__magic_name__ :int = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase ))
return [1] + ([0] * len(__lowerCAmelCase ))
| 705 |
from __future__ import annotations
from typing import Any
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0 ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Any = row, column
__magic_name__ :Union[str, Any] = [[default_value for c in range(__lowerCAmelCase )] for r in range(__lowerCAmelCase )]
def __str__( self ):
"""simple docstring"""
__magic_name__ :str = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__magic_name__ :Dict = 0
for row_vector in self.array:
for obj in row_vector:
__magic_name__ :Dict = max(__lowerCAmelCase , len(str(__lowerCAmelCase ) ) )
__magic_name__ :List[str] = F'''%{max_element_length}s'''
# Make string and return
def single_line(__lowerCAmelCase ) -> str:
nonlocal string_format_identifier
__magic_name__ :int = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__lowerCAmelCase ) for row_vector in self.array )
return s
def __repr__( self ):
"""simple docstring"""
return str(self )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if not (isinstance(__lowerCAmelCase , (list, tuple) ) and len(__lowerCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
assert self.validate_indicies(__lowerCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
assert self.validate_indicies(__lowerCAmelCase )
__magic_name__ :List[Any] = value
def __add__( self , __lowerCAmelCase ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
__magic_name__ :List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ :Any = self[r, c] + another[r, c]
return result
def __neg__( self ):
"""simple docstring"""
__magic_name__ :Dict = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ :Any = -self[r, c]
return result
def __sub__( self , __lowerCAmelCase ):
"""simple docstring"""
return self + (-another)
def __mul__( self , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (int, float) ): # Scalar multiplication
__magic_name__ :List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ :Tuple = self[r, c] * another
return result
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): # Matrix multiplication
assert self.column == another.row
__magic_name__ :List[Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__magic_name__ :List[str] = F'''Unsupported type given for another ({type(__lowerCAmelCase )})'''
raise TypeError(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ :List[str] = self[r, c]
return result
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__magic_name__ :int = v.transpose()
__magic_name__ :str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Optional[int] = Matrix(3, 3, 0 )
for i in range(3 ):
__magic_name__ :Tuple = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
__magic_name__ :Optional[int] = Matrix(3, 1, 0 )
__magic_name__ , __magic_name__ , __magic_name__ :Optional[int] = 1, 2, -3
__magic_name__ :int = Matrix(3, 1, 0 )
__magic_name__ , __magic_name__ , __magic_name__ :str = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(snake_case, snake_case )}''' )
def __lowercase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 180 | 0 |
"""simple docstring"""
from collections.abc import Generator
def snake_case ( ) -> Generator[int, None, None]:
_snake_case , _snake_case = 0, 1
while True:
_snake_case , _snake_case = b, a + b
yield b
def snake_case ( lowerCAmelCase_ = 1000 ) -> int:
_snake_case = 1
_snake_case = fibonacci_generator()
while len(str(next(lowerCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 103 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( __snake_case = "" ):
__lowerCAmelCase = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
__lowerCAmelCase = BeautifulSoup(requests.get(__snake_case ).text , "html.parser" )
__lowerCAmelCase = soup.find_all("td" , attrs="titleColumn" )
__lowerCAmelCase = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__snake_case , __snake_case )
}
def __lowerCAmelCase ( __snake_case = "IMDb_Top_250_Movies.csv" ):
__lowerCAmelCase = get_imdb_top_aaa_movies()
with open(__snake_case , "w" , newline="" ) as out_file:
__lowerCAmelCase = csv.writer(__snake_case )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 367 | 0 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A ( __snake_case , __snake_case , __snake_case ):
__magic_name__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50257 , SCREAMING_SNAKE_CASE = 1024 , SCREAMING_SNAKE_CASE = 768 , SCREAMING_SNAKE_CASE = 12 , SCREAMING_SNAKE_CASE = 12 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "gelu_new" , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 1e-5 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , ) -> List[Any]:
"""simple docstring"""
super().__init__()
A : str = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
F' `n_embd`: {n_embd} are not equal.' )
A : List[Any] = prefix_inner_dim
A : Optional[int] = prefix_hidden_dim
A : int = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A : Optional[int] = (
nn.Linear(self.prefix_hidden_dim , UpperCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A : Optional[int] = GPTaConfig(
vocab_size=UpperCamelCase_ , n_positions=UpperCamelCase_ , n_embd=UpperCamelCase_ , n_layer=UpperCamelCase_ , n_head=UpperCamelCase_ , n_inner=UpperCamelCase_ , activation_function=UpperCamelCase_ , resid_pdrop=UpperCamelCase_ , embd_pdrop=UpperCamelCase_ , attn_pdrop=UpperCamelCase_ , layer_norm_epsilon=UpperCamelCase_ , initializer_range=UpperCamelCase_ , scale_attn_weights=UpperCamelCase_ , use_cache=UpperCamelCase_ , scale_attn_by_inverse_layer_idx=UpperCamelCase_ , reorder_and_upcast_attn=UpperCamelCase_ , )
A : Union[str, Any] = GPTaLMHeadModel(UpperCamelCase_ )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = self.transformer.transformer.wte(UpperCamelCase_ )
A : Any = self.encode_prefix(UpperCamelCase_ )
A : Union[str, Any] = self.decode_prefix(UpperCamelCase_ )
A : Dict = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A : List[str] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A : int = torch.cat((dummy_token, input_ids) , dim=1 )
A : Dict = self.transformer(inputs_embeds=UpperCamelCase_ , labels=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> torch.Tensor:
"""simple docstring"""
return torch.zeros(UpperCamelCase_ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase_ )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.encode_prefix(UpperCamelCase_ )
@torch.no_grad()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : List[str] = torch.split(UpperCamelCase_ , 1 , dim=0 )
A : List[Any] = []
A : Tuple = []
for feature in features:
A : Dict = self.decode_prefix(feature.to(UpperCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
A : Any = self.generate_beam(
input_embeds=UpperCamelCase_ , device=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A : Union[str, Any] = torch.stack(UpperCamelCase_ )
A : List[Any] = torch.stack(UpperCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE = 5 , SCREAMING_SNAKE_CASE = 67 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = None , ) -> str:
"""simple docstring"""
A : Dict = eos_token_id
A : Tuple = None
A : Optional[int] = None
A : List[Any] = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.int )
A : Optional[int] = torch.zeros(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
A : Dict = input_embeds
else:
A : Optional[int] = self.transformer.transformer.wte(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
A : List[Any] = self.transformer(inputs_embeds=UpperCamelCase_ )
A : List[Any] = outputs.logits
A : Tuple = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A : List[str] = logits.softmax(-1 ).log()
if scores is None:
A : Optional[int] = logits.topk(UpperCamelCase_ , -1 )
A : Optional[Any] = generated.expand(UpperCamelCase_ , *generated.shape[1:] )
A : Optional[int] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A : List[str] = next_tokens
else:
A : Union[str, Any] = tokens.expand(UpperCamelCase_ , *tokens.shape[1:] )
A : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
A : Optional[Any] = -float(np.inf )
A : Any = 0
A : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A : List[Any] = scores_sum / seq_lengths[:, None]
A : Optional[Any] = scores_sum_average.view(-1 ).topk(UpperCamelCase_ , -1 )
A : str = next_tokens // scores_sum.shape[1]
A : int = seq_lengths[next_tokens_source]
A : List[str] = next_tokens % scores_sum.shape[1]
A : Optional[int] = next_tokens.unsqueeze(1 )
A : Optional[Any] = tokens[next_tokens_source]
A : Optional[int] = torch.cat((tokens, next_tokens) , dim=1 )
A : Optional[int] = generated[next_tokens_source]
A : List[Any] = scores_sum_average * seq_lengths
A : Dict = is_stopped[next_tokens_source]
A : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A : Dict = torch.cat((generated, next_token_embed) , dim=1 )
A : List[Any] = is_stopped + next_tokens.eq(UpperCamelCase_ ).squeeze()
if is_stopped.all():
break
A : int = scores / seq_lengths
A : Optional[int] = scores.argsort(descending=UpperCamelCase_ )
# tokens tensors are already padded to max_seq_length
A : Any = [tokens[i] for i in order]
A : Tuple = torch.stack(UpperCamelCase_ , dim=0 )
A : Optional[int] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 704 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase : str = logging.get_logger(__name__)
# TODO: upload to AWS
lowercase : Optional[Any] = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''retribert'''
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : List[Any] = vocab_size
A : Dict = hidden_size
A : Any = num_hidden_layers
A : Any = num_attention_heads
A : List[Any] = hidden_act
A : Any = intermediate_size
A : str = hidden_dropout_prob
A : int = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : Tuple = type_vocab_size
A : Optional[Any] = initializer_range
A : Union[str, Any] = layer_norm_eps
A : Dict = share_encoders
A : Dict = projection_dim
| 343 | 0 |
"""simple docstring"""
import heapq
def __magic_name__ ( UpperCamelCase : dict ) -> set[int]:
a__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(UpperCamelCase , [-1 * len(UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
a__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
a__ = heapq.heappop(UpperCamelCase )[1][0]
chosen_vertices.add(UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
a__ = elem[1][1].index(UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
a : str = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 273 |
"""simple docstring"""
def __magic_name__ ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , ) -> float:
a__ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
a__ = 1 - (matter_density + radiation_density + dark_energy)
a__ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
a__ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
a : Any = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 273 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 291 |
from __future__ import annotations
lowerCamelCase__ = list[list[int]]
# assigning initial values to the grid
lowerCamelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowerCamelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCamelCase ( snake_case__ : Matrix ,snake_case__ : int ,snake_case__ : int ,snake_case__ : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCamelCase ( snake_case__ : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCamelCase ( snake_case__ : Matrix ):
'''simple docstring'''
if location := find_empty_location(snake_case__ ):
__snake_case , __snake_case :Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
__snake_case :Union[str, Any] = digit
if sudoku(snake_case__ ) is not None:
return grid
__snake_case :Tuple = 0
return None
def UpperCamelCase ( snake_case__ : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(snake_case__ ,end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
lowerCamelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 291 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =None
UpperCamelCase =None
UpperCamelCase =None
UpperCamelCase =None
class UpperCAmelCase_ ( snake_case ):
def __init__( self , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=5_12 , UpperCamelCase_="cls" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ) -> List[Any]:
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : Optional[Any] = project_dim
__lowercase : Union[str, Any] = pooler_fn
__lowercase : List[Any] = learn_encoder
__lowercase : Union[str, Any] = use_attention_mask
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =[r"pooler", r"logit_scale"]
UpperCamelCase =[r"position_ids", r"predictions.decoder.bias"]
UpperCamelCase ="roberta"
UpperCamelCase =RobertaSeriesConfig
def __init__( self , UpperCamelCase_ ) -> Dict:
super().__init__(UpperCamelCase_ )
__lowercase : Optional[int] = XLMRobertaModel(UpperCamelCase_ )
__lowercase : Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim )
__lowercase : str = getattr(UpperCamelCase_ , '''has_pre_transformation''' , UpperCamelCase_ )
if self.has_pre_transformation:
__lowercase : int = nn.Linear(config.hidden_size , config.project_dim )
__lowercase : Optional[Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowerCamelCase ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Tuple:
__lowercase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase : int = self.base_model(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_attentions=UpperCamelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCamelCase_ , )
if self.has_pre_transformation:
__lowercase : Any = outputs['''hidden_states'''][-2]
__lowercase : Dict = self.pre_LN(UpperCamelCase_ )
__lowercase : Union[str, Any] = self.transformation_pre(UpperCamelCase_ )
return TransformationModelOutput(
projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__lowercase : List[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 76 |
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
a_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( snake_case ):
def __init__( self , *UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ) -> Tuple:
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : Union[str, Any] = eval_examples
__lowercase : Union[str, Any] = post_process_function
__lowercase : Any = quant_trainer_args
__lowercase : Optional[Any] = 1_28 # default number of calibration samples
def _lowerCamelCase ( self , UpperCamelCase_=None ) -> Any:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__lowercase : Tuple = calib_dataset if calib_dataset is not None else self.calib_dataset
__lowercase : str = self._remove_unused_columns(UpperCamelCase_ , description='''Calibration''' )
return DataLoader(
UpperCamelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase_ , )
def _lowerCamelCase ( self , UpperCamelCase_=None ) -> Any:
__lowercase : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset
__lowercase : List[Any] = self.get_calib_dataloader(UpperCamelCase_ )
__lowercase : Dict = self.model
quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args , calib=UpperCamelCase_ )
model.eval()
quant_trainer.enable_calibration(UpperCamelCase_ )
logger.info('''***** Running calibration *****''' )
logger.info(F""" Num examples = {self.calib_num}""" )
logger.info(F""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(UpperCamelCase_ ):
# Prediction step
__lowercase ,__lowercase ,__lowercase : Optional[Any] = self.prediction_step(UpperCamelCase_ , UpperCamelCase_ , prediction_loss_only=UpperCamelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCamelCase_ , self.quant_trainer_args )
__lowercase : Tuple = model
def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = "eval" ) -> str:
__lowercase : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
__lowercase : Union[str, Any] = self.get_eval_dataloader(UpperCamelCase_ )
__lowercase : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase : Optional[int] = self.compute_metrics
__lowercase : Dict = None
__lowercase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase : Tuple = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , )
finally:
__lowercase : List[str] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__lowercase : int = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions )
__lowercase : Optional[int] = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__lowercase : List[str] = metrics.pop(UpperCamelCase_ )
self.log(UpperCamelCase_ )
else:
__lowercase : Dict = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowercase : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_ = "test" ) -> List[Any]:
__lowercase : Optional[int] = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase : str = self.compute_metrics
__lowercase : Dict = None
__lowercase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase : Union[str, Any] = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , )
finally:
__lowercase : Any = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowercase : Dict = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions , '''predict''' )
__lowercase : Optional[int] = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__lowercase : List[str] = metrics.pop(UpperCamelCase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_="./" ) -> int:
__lowercase : Optional[int] = self.eval_dataset
__lowercase : Optional[int] = self.get_eval_dataloader(UpperCamelCase_ )
__lowercase : Any = next(iter(UpperCamelCase_ ) )
# saving device - to make it consistent
__lowercase : Any = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__lowercase : Tuple = tuple(v.to(UpperCamelCase_ ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__lowercase : List[Any] = True
__lowercase : int = self.model.to(UpperCamelCase_ )
model.eval()
model.float()
__lowercase : Optional[int] = model.module if hasattr(UpperCamelCase_ , '''module''' ) else model
quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args )
__lowercase : Tuple = os.path.join(UpperCamelCase_ , '''model.onnx''' )
logger.info(F"""exporting model to {output_model_file}""" )
__lowercase : Tuple = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , export_params=UpperCamelCase_ , opset_version=13 , do_constant_folding=UpperCamelCase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=UpperCamelCase_ , )
logger.info('''onnx export finished''' )
| 76 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=19 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ) -> str:
SCREAMING_SNAKE_CASE__: str= parent
SCREAMING_SNAKE_CASE__: Tuple= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= seq_length
SCREAMING_SNAKE_CASE__: Any= is_training
SCREAMING_SNAKE_CASE__: Any= use_input_mask
SCREAMING_SNAKE_CASE__: Any= use_token_type_ids
SCREAMING_SNAKE_CASE__: Dict= use_labels
SCREAMING_SNAKE_CASE__: List[Any]= vocab_size
SCREAMING_SNAKE_CASE__: int= hidden_size
SCREAMING_SNAKE_CASE__: int= num_hidden_layers
SCREAMING_SNAKE_CASE__: Optional[int]= num_attention_heads
SCREAMING_SNAKE_CASE__: str= intermediate_size
SCREAMING_SNAKE_CASE__: int= hidden_act
SCREAMING_SNAKE_CASE__: Tuple= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: List[str]= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: Optional[int]= max_position_embeddings
SCREAMING_SNAKE_CASE__: Tuple= type_vocab_size
SCREAMING_SNAKE_CASE__: Optional[Any]= type_sequence_label_size
SCREAMING_SNAKE_CASE__: str= initializer_range
SCREAMING_SNAKE_CASE__: Tuple= num_labels
SCREAMING_SNAKE_CASE__: Tuple= num_choices
SCREAMING_SNAKE_CASE__: List[Any]= scope
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Tuple= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__: List[Any]= None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__: str= random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: List[str]= None
SCREAMING_SNAKE_CASE__: str= None
if self.use_labels:
SCREAMING_SNAKE_CASE__: Any= ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__: Dict= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__: Any= ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__: int= self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Optional[Any]= EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowerCAmelCase , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: List[str]= EsmForProteinFolding(config=lowerCAmelCase ).float()
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase , attention_mask=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= model(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= model(lowerCAmelCase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
): Any= config_and_inputs
SCREAMING_SNAKE_CASE__: int= {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = False
__a = (EsmForProteinFolding,) if is_torch_available() else ()
__a = ()
__a = {} if is_torch_available() else {}
__a = False
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= EsmFoldModelTester(self )
SCREAMING_SNAKE_CASE__: Any= ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: int= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
@unittest.skip('''Does not support attention outputs''' )
def UpperCamelCase_ ( self ) -> List[Any]:
pass
@unittest.skip
def UpperCamelCase_ ( self ) -> Dict:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCamelCase_ ( self ) -> Optional[int]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCamelCase_ ( self ) -> str:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def UpperCamelCase_ ( self ) -> Any:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCamelCase_ ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCamelCase_ ( self ) -> int:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCamelCase_ ( self ) -> Dict:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def UpperCamelCase_ ( self ) -> List[Any]:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def UpperCamelCase_ ( self ) -> List[str]:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def UpperCamelCase_ ( self ) -> Tuple:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def UpperCamelCase_ ( self ) -> int:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def UpperCamelCase_ ( self ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def UpperCamelCase_ ( self ) -> Any:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCamelCase_ ( self ) -> Any:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCamelCase_ ( self ) -> int:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCamelCase_ ( self ) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def UpperCamelCase_ ( self ) -> Tuple:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase_ ( self ) -> Tuple:
pass
@require_torch
class _lowerCamelCase ( UpperCamelCase_ ):
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
SCREAMING_SNAKE_CASE__: Dict= torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE__: Any= model(lowerCAmelCase )['''positions''']
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowerCAmelCase , atol=1e-4 ) )
| 107 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = AltDiffusionPipeline
__a = TEXT_TO_IMAGE_PARAMS
__a = TEXT_TO_IMAGE_BATCH_PARAMS
__a = TEXT_TO_IMAGE_IMAGE_PARAMS
__a = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE__: List[Any]= 77
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Any= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self ) -> List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCamelCase_ ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__: List[str]= RobertaSeriesModelWithTransformation(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= text_encoder
SCREAMING_SNAKE_CASE__: List[Any]= AltDiffusionPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A photo of an astronaut'''
SCREAMING_SNAKE_CASE__: Any= alt_pipe(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= output.images
SCREAMING_SNAKE_CASE__: Tuple= image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__: Any= np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[str]= '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: List[str]= PNDMScheduler(skip_prk_steps=lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__: str= RobertaSeriesModelWithTransformation(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= text_encoder
SCREAMING_SNAKE_CASE__: Any= AltDiffusionPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= alt_pipe(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= output.images
SCREAMING_SNAKE_CASE__: Tuple= image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__: List[Any]= np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Optional[int]:
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__: Any= AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= alt_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE__: List[Any]= output.images
SCREAMING_SNAKE_CASE__: Dict= image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__: Optional[int]= np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: List[Any]= DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__: List[str]= AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__: Any= torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= alt_pipe([prompt] , generator=lowerCAmelCase , num_inference_steps=2 , output_type='''numpy''' )
SCREAMING_SNAKE_CASE__: List[str]= output.images
SCREAMING_SNAKE_CASE__: Dict= image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__: Optional[Any]= np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 107 | 1 |
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ):
"""simple docstring"""
if index == number_of_items:
return 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = knapsack(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,index + 1 )
if weights[index] <= max_weight:
_SCREAMING_SNAKE_CASE = values[index] + knapsack(
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,max_weight - weights[index] ,index + 1 )
return max(UpperCAmelCase__ ,UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE : Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING
def __magic_name__ ( self )-> Dict:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __magic_name__ ( self )-> List[str]:
_SCREAMING_SNAKE_CASE = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
_SCREAMING_SNAKE_CASE = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-0_5, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-0_5, 'token': 25506, 'token_str': ' accuser'},
] , )
_SCREAMING_SNAKE_CASE = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-0_5,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-0_5,
'token': 25506,
'token_str': ' accuser',
},
] , )
_SCREAMING_SNAKE_CASE = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-0_5, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-0_5, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def __magic_name__ ( self )-> Tuple:
_SCREAMING_SNAKE_CASE = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
_SCREAMING_SNAKE_CASE = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-0_5, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS'},
] , )
_SCREAMING_SNAKE_CASE = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-0_5,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS'},
] , )
_SCREAMING_SNAKE_CASE = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-0_5, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-0_5, 'token': 13606, 'token_str': ' Clara'},
] , )
_SCREAMING_SNAKE_CASE = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
[
{
'score': 2.2e-0_5,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-0_5,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def __magic_name__ ( self )-> int:
_SCREAMING_SNAKE_CASE = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
_SCREAMING_SNAKE_CASE = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(A_ , A_ )
@slow
@require_torch
def __magic_name__ ( self )-> Optional[int]:
_SCREAMING_SNAKE_CASE = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(A_ )
@slow
@require_tf
def __magic_name__ ( self )-> str:
_SCREAMING_SNAKE_CASE = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(A_ )
def __magic_name__ ( self , A_ )-> List[str]:
_SCREAMING_SNAKE_CASE = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(A_ ) , [
{'sequence': 'My name is John', 'score': 0.008, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.007, 'token': 1573, 'token_str': ' Chris'},
] , )
_SCREAMING_SNAKE_CASE = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(A_ ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.251,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.214,
'token': 12790,
'token_str': ' Lyon',
},
] , )
_SCREAMING_SNAKE_CASE = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(A_ ) , [
{'sequence': 'My name is Patrick', 'score': 0.005, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.000, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.000, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def __magic_name__ ( self )-> Dict:
_SCREAMING_SNAKE_CASE = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
self.run_pipeline_test(A_ , [] )
@require_tf
def __magic_name__ ( self )-> Any:
_SCREAMING_SNAKE_CASE = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
self.run_pipeline_test(A_ , [] )
def __magic_name__ ( self , A_ , A_ , A_ )-> Optional[int]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ )
_SCREAMING_SNAKE_CASE = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def __magic_name__ ( self , A_ , A_ )-> Optional[int]:
_SCREAMING_SNAKE_CASE = fill_masker.tokenizer
_SCREAMING_SNAKE_CASE = fill_masker.model
_SCREAMING_SNAKE_CASE = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
A_ , [
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
] , )
_SCREAMING_SNAKE_CASE = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
A_ , [
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
] , )
_SCREAMING_SNAKE_CASE = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
A_ , [
[
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
],
[
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
],
] , )
with self.assertRaises(A_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(A_ ):
fill_masker('This is' )
self.run_test_top_k(A_ , A_ )
self.run_test_targets(A_ , A_ )
self.run_test_top_k_targets(A_ , A_ )
self.fill_mask_with_duplicate_targets_and_top_k(A_ , A_ )
self.fill_mask_with_multiple_masks(A_ , A_ )
def __magic_name__ ( self , A_ , A_ )-> List[Any]:
_SCREAMING_SNAKE_CASE = tokenizer.get_vocab()
_SCREAMING_SNAKE_CASE = sorted(vocab.keys() )[:2]
# Pipeline argument
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ , targets=A_ )
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
A_ , [
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
] , )
_SCREAMING_SNAKE_CASE = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , A_ )
_SCREAMING_SNAKE_CASE = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(A_ ) )
# Call argument
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ )
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=A_ )
self.assertEqual(
A_ , [
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
] , )
_SCREAMING_SNAKE_CASE = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , A_ )
_SCREAMING_SNAKE_CASE = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(A_ ) )
# Score equivalence
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=A_ )
_SCREAMING_SNAKE_CASE = [top_mask['token_str'] for top_mask in outputs]
_SCREAMING_SNAKE_CASE = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(A_ ) == set(A_ ):
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=A_ )
_SCREAMING_SNAKE_CASE = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(A_ ) , nested_simplify(A_ ) )
# Raises with invalid
with self.assertRaises(A_ ):
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(A_ ):
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[''] )
with self.assertRaises(A_ ):
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets='' )
def __magic_name__ ( self , A_ , A_ )-> List[str]:
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ , top_k=2 )
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
A_ , [
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
] , )
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ )
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
A_ , [
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
] , )
self.assertEqual(nested_simplify(A_ ) , nested_simplify(A_ ) )
def __magic_name__ ( self , A_ , A_ )-> Dict:
_SCREAMING_SNAKE_CASE = tokenizer.get_vocab()
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ )
# top_k=2, ntargets=3
_SCREAMING_SNAKE_CASE = sorted(vocab.keys() )[:3]
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=A_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_SCREAMING_SNAKE_CASE = [el['token_str'] for el in sorted(A_ , key=lambda A_ : x["score"] , reverse=A_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(A_ ).issubset(A_ ):
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=A_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(A_ ) , nested_simplify(A_ ) )
def __magic_name__ ( self , A_ , A_ )-> Dict:
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ )
_SCREAMING_SNAKE_CASE = tokenizer.get_vocab()
# String duplicates + id duplicates
_SCREAMING_SNAKE_CASE = sorted(vocab.keys() )[:3]
_SCREAMING_SNAKE_CASE = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_SCREAMING_SNAKE_CASE = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=A_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(A_ ) , 3 )
def __magic_name__ ( self , A_ , A_ )-> int:
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ )
_SCREAMING_SNAKE_CASE = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
A_ , [
[
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
],
[
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
],
[
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
],
] , )
| 605 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a ( __UpperCAmelCase ):
def __init__( self : List[Any] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 376 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase_ = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def _UpperCAmelCase ( UpperCamelCase: Optional[int] , UpperCamelCase: Tuple ):
"""simple docstring"""
inspect_dataset(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = path + ".py"
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def _UpperCAmelCase ( UpperCamelCase: List[Any] , UpperCamelCase: Any ):
"""simple docstring"""
inspect_metric(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = path + ".py"
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] , UpperCamelCase: int , UpperCamelCase: Tuple ):
"""simple docstring"""
__lowerCAmelCase = get_dataset_config_info(UpperCamelCase , config_name=UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _UpperCAmelCase ( UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: List[str] ):
"""simple docstring"""
with pytest.raises(UpperCamelCase ):
get_dataset_config_info(UpperCamelCase , config_name=UpperCamelCase )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def _UpperCAmelCase ( UpperCamelCase: List[str] , UpperCamelCase: Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = get_dataset_config_names(UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: Tuple , UpperCamelCase: List[Any] ):
"""simple docstring"""
__lowerCAmelCase = get_dataset_infos(UpperCamelCase )
assert list(infos.keys() ) == expected_configs
__lowerCAmelCase = expected_configs[0]
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _UpperCAmelCase ( UpperCamelCase: Optional[int] , UpperCamelCase: int , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = get_dataset_infos(UpperCamelCase )
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _UpperCAmelCase ( UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: List[Any] ):
"""simple docstring"""
with pytest.raises(UpperCamelCase ):
get_dataset_split_names(UpperCamelCase , config_name=UpperCamelCase )
| 376 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_snake_case : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _A ( __snake_case :Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoMAEConfig()
set_architecture_configs(__snake_case , __snake_case )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = False
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "kinetics" in model_name:
__SCREAMING_SNAKE_CASE = 400
__SCREAMING_SNAKE_CASE = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__SCREAMING_SNAKE_CASE = 174
__SCREAMING_SNAKE_CASE = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(__snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def _A ( __snake_case :Dict , __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "small" in model_name:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = 768
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 2048
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 1280
__SCREAMING_SNAKE_CASE = 5120
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 640
__SCREAMING_SNAKE_CASE = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _A ( __snake_case :List[Any] ) -> Optional[int]:
"""simple docstring"""
if "encoder." in name:
__SCREAMING_SNAKE_CASE = name.replace("encoder." , "" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__SCREAMING_SNAKE_CASE = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__SCREAMING_SNAKE_CASE = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
return name
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__snake_case )
if key.startswith("encoder." ):
__SCREAMING_SNAKE_CASE = key.replace("encoder." , "" )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
if key.startswith("decoder.blocks" ):
__SCREAMING_SNAKE_CASE = config.decoder_hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[2] )
__SCREAMING_SNAKE_CASE = "decoder.decoder_layers."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = config.hidden_size
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = "videomae.encoder.layer."
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = np.load(__snake_case )
return list(__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_videomae_config(__snake_case )
if "finetuned" in model_name:
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification(__snake_case )
else:
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(__snake_case )
# download original checkpoint, hosted on Google Drive
__SCREAMING_SNAKE_CASE = "pytorch_model.bin"
gdown.cached_download(__snake_case , __snake_case , quiet=__snake_case )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
if "model" in files:
__SCREAMING_SNAKE_CASE = files["model"]
else:
__SCREAMING_SNAKE_CASE = files["module"]
__SCREAMING_SNAKE_CASE = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
model.eval()
# verify model on basic input
__SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(__snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case )
__SCREAMING_SNAKE_CASE = model(**__snake_case )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
__SCREAMING_SNAKE_CASE = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
__SCREAMING_SNAKE_CASE = torch.Size([1, 400] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 1408, 1536] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
__SCREAMING_SNAKE_CASE = torch.Size([1, 174] )
__SCREAMING_SNAKE_CASE = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__SCREAMING_SNAKE_CASE = outputs.loss
assert torch.allclose(__snake_case , __snake_case , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 693 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : Tuple = 'backbone.' if is_semantic else ''
A_ : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", 'beit.embeddings.cls_token'),
(f"""{prefix}patch_embed.proj.weight""", 'beit.embeddings.patch_embeddings.projection.weight'),
(f"""{prefix}patch_embed.proj.bias""", 'beit.embeddings.patch_embeddings.projection.bias'),
(f"""{prefix}pos_embed""", 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A_ : Any = 'backbone.' if is_semantic else ''
# queries, keys and values
A_ : List[str] = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
A_ : Any = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
A_ : int = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
A_ : int = in_proj_weight[
: config.hidden_size, :
]
A_ : Union[str, Any] = q_bias
A_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Dict = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A_ : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
A_ : Dict = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
A_ : Any = gamma_a
A_ : Any = gamma_a
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = dct.pop(_UpperCAmelCase )
A_ : Tuple = val
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : Union[str, Any] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : List[str] = False if 'rvlcdip' in checkpoint_url else True
A_ : Dict = BeitConfig(use_absolute_position_embeddings=_UpperCAmelCase , use_mask_token=_UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A_ : List[str] = 1024
A_ : Optional[Any] = 4096
A_ : List[str] = 24
A_ : List[str] = 16
# labels
if "rvlcdip" in checkpoint_url:
A_ : Optional[Any] = 16
A_ : Optional[Any] = 'huggingface/label-files'
A_ : Any = 'rvlcdip-id2label.json'
A_ : List[str] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
A_ : List[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A_ : Dict = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )['model']
A_ : List[Any] = create_rename_keys(_UpperCAmelCase , has_lm_head=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , has_lm_head=_UpperCAmelCase )
# load HuggingFace model
A_ : Optional[int] = BeitForMaskedImageModeling(_UpperCAmelCase ) if has_lm_head else BeitForImageClassification(_UpperCAmelCase )
model.eval()
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image
A_ : Optional[int] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCAmelCase )
A_ : Union[str, Any] = prepare_img()
A_ : List[Any] = image_processor(images=_UpperCAmelCase , return_tensors='pt' )
A_ : Any = encoding['pixel_values']
A_ : Optional[int] = model(_UpperCAmelCase )
A_ : Tuple = outputs.logits
# verify logits
A_ : Dict = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_UpperCAmelCase ), "Shape of logits not as expected"
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
if has_lm_head:
A_ : str = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
A_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
lowerCamelCase_ : Any = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
lowerCamelCase_ : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 302 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowerCamelCase_ : Union[str, Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowerCamelCase_ : Optional[int] = [0, 25, 50]
lowerCamelCase_ : Union[str, Any] = [25, 50, 75]
lowerCamelCase_ : List[Any] = fuzz.membership.trimf(X, abca)
lowerCamelCase_ : Optional[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowerCamelCase_ : Optional[int] = np.ones(75)
lowerCamelCase_ : Optional[int] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowerCamelCase_ : Dict = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowerCamelCase_ : Union[str, Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowerCamelCase_ : List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowerCamelCase_ : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowerCamelCase_ : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowerCamelCase_ : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowerCamelCase_ : List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowerCamelCase_ : List[Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 302 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase,unittest.TestCase ):
_A = DiTPipeline
_A = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_A = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_A = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_A = False
def __lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a_ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=a_ , )
SCREAMING_SNAKE_CASE_ : List[str] = AutoencoderKL()
SCREAMING_SNAKE_CASE_ : str = DDIMScheduler()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def __lowerCamelCase ( self , lowercase__ , lowercase__=0 ):
"""simple docstring"""
if str(a_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(a_ )
else:
SCREAMING_SNAKE_CASE_ : List[str] = torch.Generator(device=a_ ).manual_seed(a_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "cpu"
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE_ : Any = pipe(**a_ ).images
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
SCREAMING_SNAKE_CASE_ : Any = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
SCREAMING_SNAKE_CASE_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1e-3 )
def __lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=a_ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = ["vase", "umbrella", "white shark", "white wolf"]
SCREAMING_SNAKE_CASE_ : Dict = pipe.get_label_ids(a_ )
SCREAMING_SNAKE_CASE_ : List[str] = pipe(a_ , generator=a_ , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
SCREAMING_SNAKE_CASE_ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = ["vase", "umbrella"]
SCREAMING_SNAKE_CASE_ : List[Any] = pipe.get_label_ids(a_ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = pipe(a_ , generator=a_ , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE_ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 421 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__magic_name__ = 1.054_571_817E-34 # unit of ℏ : J * s
__magic_name__ = 3E8 # unit of c : m * s^-1
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if (force, area, distance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if force < 0:
raise ValueError("Magnitude of force can not be negative")
if distance < 0:
raise ValueError("Distance can not be negative")
if area < 0:
raise ValueError("Area can not be negative")
if force == 0:
lowerCamelCase_ : Dict = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCamelCase_ : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCamelCase_ : Any = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0")
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 0 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _lowercase ( __UpperCAmelCase ):
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ) -> Tuple:
super().__init__(
split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase : Tuple = load_from_cache_file
lowerCamelCase : Union[str, Any] = file_format
lowerCamelCase : Tuple = Spark(
df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , )
def _UpperCamelCase ( self ) -> str:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCamelCase : Optional[Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 711 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : int = Mock()
lowerCamelCase : int = conn, Mock()
lowerCamelCase : Any = iter([1, None] )
lowerCamelCase : Dict = lambda a_ : next(a_ )
# ===== invoke =====
send_file(filename='mytext.txt', testing=a_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 133 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.