code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case_ : Union[str, Any] = model_type_to_module_name(_UpperCamelCase )
snake_case_ : Dict = importlib.import_module(f'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(_UpperCamelCase , _UpperCamelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_UpperCamelCase , '''__name__''' , _UpperCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case_ : Union[str, Any] = importlib.import_module('''transformers''' )
if hasattr(_UpperCamelCase , _UpperCamelCase ):
return getattr(_UpperCamelCase , _UpperCamelCase )
return None
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , **_UpperCamelCase , ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = get_file_from_repo(
_UpperCamelCase , _UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , resume_download=_UpperCamelCase , proxies=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , local_files_only=_UpperCamelCase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_UpperCamelCase , encoding='''utf-8''' ) as reader:
return json.load(_UpperCamelCase )
class __lowerCAmelCase :
def __init__(self ) -> Optional[Any]:
'''simple docstring'''
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__magic_name__ )
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = kwargs.pop('''config''' , __magic_name__ )
snake_case_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , __magic_name__ )
snake_case_ : Tuple = True
snake_case_ , snake_case_ : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(__magic_name__ , **__magic_name__ )
snake_case_ : int = config_dict.get('''feature_extractor_type''' , __magic_name__ )
snake_case_ : str = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
snake_case_ : Any = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Any = AutoConfig.from_pretrained(__magic_name__ , **__magic_name__ )
# It could be in `config.feature_extractor_type``
snake_case_ : List[Any] = getattr(__magic_name__ , '''feature_extractor_type''' , __magic_name__ )
if hasattr(__magic_name__ , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
snake_case_ : List[str] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
snake_case_ : List[Any] = feature_extractor_class_from_name(__magic_name__ )
snake_case_ : Optional[int] = feature_extractor_auto_map is not None
snake_case_ : Tuple = feature_extractor_class is not None or type(__magic_name__ ) in FEATURE_EXTRACTOR_MAPPING
snake_case_ : Optional[int] = resolve_trust_remote_code(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if has_remote_code and trust_remote_code:
snake_case_ : int = get_class_from_dynamic_module(
__magic_name__ , __magic_name__ , **__magic_name__ )
snake_case_ : int = kwargs.pop('''code_revision''' , __magic_name__ )
if os.path.isdir(__magic_name__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__magic_name__ , **__magic_name__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__magic_name__ , **__magic_name__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__magic_name__ ) in FEATURE_EXTRACTOR_MAPPING:
snake_case_ : Optional[Any] = FEATURE_EXTRACTOR_MAPPING[type(__magic_name__ )]
return feature_extractor_class.from_dict(__magic_name__ , **__magic_name__ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(__magic_name__ , __magic_name__ )
| 60 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 507 | 0 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCAmelCase : str = TOKENIZER_CLASSES
else:
lowerCAmelCase : str = {tokenizer_name: getattr(_A , tokenizer_name + "Fast" )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase : List[Any] = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase : Optional[int] = True
if checkpoint_name is None:
lowerCAmelCase : Any = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase : str = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCAmelCase : List[Any] = tokenizer_class.from_pretrained(_A , force_download=_A )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase , lowerCAmelCase : List[str] = checkpoint.split("/" )
lowerCAmelCase : Dict = os.path.join(_A , _A )
elif add_prefix:
lowerCAmelCase : List[str] = checkpoint
lowerCAmelCase : Optional[Any] = dump_path
else:
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Union[str, Any] = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase : Dict = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase : Any = file_path.split(_A )[-1][0]
if next_char == "/":
lowerCAmelCase : int = os.path.join(_A , _A )
lowerCAmelCase : List[str] = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCAmelCase : Optional[int] = tokenizer.save_pretrained(
_A , legacy_format=_A , filename_prefix=_A )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(_A )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 719 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 681 | 0 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCamelCase_ = get_logger(__name__)
lowerCamelCase_ = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class __a :
"""simple docstring"""
@add_start_docstrings(_UpperCamelCase )
def __call__( self : Optional[Any] ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class __a :
"""simple docstring"""
@add_start_docstrings(_UpperCamelCase )
def __call__( self : Optional[Any] ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class __a ( __lowerCamelCase ):
"""simple docstring"""
@add_start_docstrings(_UpperCamelCase )
def __call__( self : Optional[int] ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : int ,**_UpperCamelCase : List[Any] ) -> jnp.ndarray:
'''simple docstring'''
for processor in self:
SCREAMING_SNAKE_CASE__ =inspect.signature(processor.__call__ ).parameters
if len(_UpperCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"""Make sure that all the required parameters: {list(function_args.keys() )} for """
f"""{processor.__class__} are passed to the logits processor.""" )
SCREAMING_SNAKE_CASE__ =processor(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ =processor(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
return scores
class __a ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] ,_UpperCamelCase : float ) -> List[str]:
'''simple docstring'''
if not isinstance(_UpperCamelCase ,_UpperCamelCase ) or not (temperature > 0):
raise ValueError(f"""`temperature` has to be a strictly positive float, but is {temperature}""" )
SCREAMING_SNAKE_CASE__ =temperature
def __call__( self : Optional[int] ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : int ) -> jnp.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =scores / self.temperature
return scores
class __a ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,_UpperCamelCase : float ,_UpperCamelCase : float = -float("""Inf""" ) ,_UpperCamelCase : int = 1 ) -> Optional[int]:
'''simple docstring'''
if not isinstance(_UpperCamelCase ,_UpperCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(_UpperCamelCase ,_UpperCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
SCREAMING_SNAKE_CASE__ =top_p
SCREAMING_SNAKE_CASE__ =filter_value
SCREAMING_SNAKE_CASE__ =min_tokens_to_keep
def __call__( self : Dict ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : int ) -> jnp.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =lax.top_k(_UpperCamelCase ,scores.shape[-1] )
SCREAMING_SNAKE_CASE__ =jnp.full_like(_UpperCamelCase ,self.filter_value )
SCREAMING_SNAKE_CASE__ =jax.nn.softmax(_UpperCamelCase ,axis=-1 ).cumsum(axis=-1 )
SCREAMING_SNAKE_CASE__ =cumulative_probs < self.top_p
# include the token that is higher than top_p as well
SCREAMING_SNAKE_CASE__ =jnp.roll(_UpperCamelCase ,1 )
score_mask |= score_mask.at[:, 0].set(_UpperCamelCase )
# min tokens to keep
SCREAMING_SNAKE_CASE__ =score_mask.at[:, : self.min_tokens_to_keep].set(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =jnp.where(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =jax.lax.sort_key_val(_UpperCamelCase ,_UpperCamelCase )[-1]
return next_scores
class __a ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,_UpperCamelCase : int ,_UpperCamelCase : float = -float("""Inf""" ) ,_UpperCamelCase : int = 1 ) -> List[Any]:
'''simple docstring'''
if not isinstance(_UpperCamelCase ,_UpperCamelCase ) or top_k <= 0:
raise ValueError(f"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
SCREAMING_SNAKE_CASE__ =max(_UpperCamelCase ,_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =filter_value
def __call__( self : List[str] ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : int ) -> jnp.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =scores.shape
SCREAMING_SNAKE_CASE__ =jnp.full(batch_size * vocab_size ,self.filter_value )
SCREAMING_SNAKE_CASE__ =min(self.top_k ,scores.shape[-1] ) # Safety check
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =lax.top_k(_UpperCamelCase ,_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =jnp.broadcast_to((jnp.arange(_UpperCamelCase ) * vocab_size)[:, None] ,(batch_size, topk) ).flatten()
SCREAMING_SNAKE_CASE__ =topk_scores.flatten()
SCREAMING_SNAKE_CASE__ =topk_indices.flatten() + shift
SCREAMING_SNAKE_CASE__ =next_scores_flat.at[topk_indices_flat].set(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =next_scores_flat.reshape(_UpperCamelCase ,_UpperCamelCase )
return next_scores
class __a ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : str ,_UpperCamelCase : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =bos_token_id
def __call__( self : Optional[int] ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : int ) -> jnp.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =jnp.full(scores.shape ,-float("""inf""" ) )
SCREAMING_SNAKE_CASE__ =1 - jnp.bool_(cur_len - 1 )
SCREAMING_SNAKE_CASE__ =jnp.where(_UpperCamelCase ,new_scores.at[:, self.bos_token_id].set(0 ) ,_UpperCamelCase )
return scores
class __a ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] ,_UpperCamelCase : int ,_UpperCamelCase : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =max_length
SCREAMING_SNAKE_CASE__ =eos_token_id
def __call__( self : Union[str, Any] ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : int ) -> jnp.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =jnp.full(scores.shape ,-float("""inf""" ) )
SCREAMING_SNAKE_CASE__ =1 - jnp.bool_(cur_len - self.max_length + 1 )
SCREAMING_SNAKE_CASE__ =jnp.where(_UpperCamelCase ,new_scores.at[:, self.eos_token_id].set(0 ) ,_UpperCamelCase )
return scores
class __a ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : int ,_UpperCamelCase : int ,_UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(_UpperCamelCase ,_UpperCamelCase ) or min_length < 0:
raise ValueError(f"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(_UpperCamelCase ,_UpperCamelCase ) or eos_token_id < 0:
raise ValueError(f"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
SCREAMING_SNAKE_CASE__ =min_length
SCREAMING_SNAKE_CASE__ =eos_token_id
def __call__( self : List[Any] ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : int ) -> jnp.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =1 - jnp.clip(cur_len - self.min_length ,0 ,1 )
SCREAMING_SNAKE_CASE__ =jnp.where(_UpperCamelCase ,scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) ,_UpperCamelCase )
return scores
class __a ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Dict ,_UpperCamelCase : str ,_UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =list(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =begin_index
def __call__( self : Tuple ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =1 - jnp.bool_(cur_len - self.begin_index )
SCREAMING_SNAKE_CASE__ =jnp.where(_UpperCamelCase ,scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) ,_UpperCamelCase )
return scores
class __a ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Any ,_UpperCamelCase : list ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =list(_UpperCamelCase )
def __call__( self : Optional[int] ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : int ) -> jnp.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class __a ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Any ,_UpperCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =dict(_UpperCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
SCREAMING_SNAKE_CASE__ =jnp.ones((max(force_token_map.keys() ) + 1) ,dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
SCREAMING_SNAKE_CASE__ =force_token_array.at[index].set(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =jnp.intaa(_UpperCamelCase )
def __call__( self : List[Any] ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : int ) -> jnp.ndarray:
'''simple docstring'''
def _force_token(_UpperCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE__ =scores.shape[0]
SCREAMING_SNAKE_CASE__ =self.force_token_array[generation_idx]
SCREAMING_SNAKE_CASE__ =jnp.ones_like(_UpperCamelCase ,dtype=scores.dtype ) * -float("""inf""" )
SCREAMING_SNAKE_CASE__ =jnp.zeros((batch_size, 1) ,dtype=scores.dtype )
SCREAMING_SNAKE_CASE__ =lax.dynamic_update_slice(_UpperCamelCase ,_UpperCamelCase ,(0, current_token) )
return new_scores
SCREAMING_SNAKE_CASE__ =lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(_UpperCamelCase ) ,lambda: scores ,) ,)
return scores
class __a ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] ,_UpperCamelCase : str ,_UpperCamelCase : Dict ,_UpperCamelCase : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =generate_config.eos_token_id
SCREAMING_SNAKE_CASE__ =generate_config.no_timestamps_token_id
SCREAMING_SNAKE_CASE__ =generate_config.no_timestamps_token_id + 1
SCREAMING_SNAKE_CASE__ =decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_UpperCamelCase ,"""max_initial_timestamp_index""" ):
SCREAMING_SNAKE_CASE__ =generate_config.max_initial_timestamp_index
else:
SCREAMING_SNAKE_CASE__ =model_config.vocab_size
if self.max_initial_timestamp_index is None:
SCREAMING_SNAKE_CASE__ =model_config.vocab_size
def __call__( self : Tuple ,_UpperCamelCase : int ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(_UpperCamelCase : Any ,_UpperCamelCase : Dict ):
SCREAMING_SNAKE_CASE__ =jnp.where((cur_len - self.begin_index) >= 1 ,_UpperCamelCase ,_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,_UpperCamelCase ,)
SCREAMING_SNAKE_CASE__ =jnp.where((cur_len - self.begin_index) < 2 ,_UpperCamelCase ,_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,_UpperCamelCase ,_UpperCamelCase ,)
return jnp.where(
_UpperCamelCase ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) ,scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) ,) ,_UpperCamelCase ,)
SCREAMING_SNAKE_CASE__ =jax.vmap(_UpperCamelCase )(_UpperCamelCase ,_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =jnp.where(cur_len == self.begin_index ,_UpperCamelCase ,_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,_UpperCamelCase ,)
SCREAMING_SNAKE_CASE__ =self.timestamp_begin + self.max_initial_timestamp_index
SCREAMING_SNAKE_CASE__ =jnp.where(
_UpperCamelCase ,scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) ,_UpperCamelCase ,)
# if sum of probability over timestamps is above any other token, sample timestamp
SCREAMING_SNAKE_CASE__ =jax.nn.log_softmax(_UpperCamelCase ,axis=-1 )
def handle_cumulative_probs(_UpperCamelCase : Dict ,_UpperCamelCase : Tuple ):
SCREAMING_SNAKE_CASE__ =jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1 )
SCREAMING_SNAKE_CASE__ =jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) ,_UpperCamelCase ,)
SCREAMING_SNAKE_CASE__ =jax.vmap(_UpperCamelCase )(_UpperCamelCase ,_UpperCamelCase )
return scores
| 151 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 151 | 1 |
from __future__ import annotations
def __UpperCamelCase ( _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
return [ord(_lowerCAmelCase ) - 96 for elem in plain]
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Tuple = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , _lowerCAmelCase )
print("""Decoded:""" , decode(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 520 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
A : Union[str, Any] = old_name
if "patch_embed" in old_name:
A , A , A : Optional[Any] = old_name.split(""".""" )
if layer == "0":
A : Optional[int] = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
A : List[str] = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
A : Optional[int] = old_name.replace("""3""" , """convolution2""" )
else:
A : List[Any] = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(R"""\d\.\d""" , _lowerCAmelCase ):
A : Union[str, Any] = R"""\b\d{2}\b"""
if bool(re.search(_lowerCAmelCase , _lowerCAmelCase ) ):
A : int = re.search(R"""\d\.\d\d.""" , _lowerCAmelCase ).group()
else:
A : int = re.search(R"""\d\.\d.""" , _lowerCAmelCase ).group()
if int(match[0] ) < 6:
A : Any = old_name.replace(_lowerCAmelCase , """""" )
A : Dict = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
A : int = """intermediate_stages.""" + trimmed_name
else:
A : Tuple = old_name.replace(_lowerCAmelCase , """""" )
if int(match[2] ) < num_meta4D_last_stage:
A : Optional[Any] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
A : int = str(int(match[2] ) - num_meta4D_last_stage )
A : Optional[Any] = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
A : Optional[Any] = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
A : Optional[int] = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
A : Dict = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
A : Optional[Any] = trimmed_name.replace("""fc2""" , """linear_out""" )
A : Dict = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(R""".\d.""" , _lowerCAmelCase ):
A : Union[str, Any] = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
A : Any = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A : Union[str, Any] = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A : Tuple = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
A : Tuple = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
A : int = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
A : Dict = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
A : int = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A : Any = new_name.replace("""norm""" , """layernorm""" )
A : int = """efficientformer.""" + new_name
else:
A : int = """efficientformer.encoder.""" + new_name
return new_name
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
for key in checkpoint.copy().keys():
A : Union[str, Any] = checkpoint.pop(_lowerCAmelCase )
A : Optional[Any] = val
return checkpoint
def __UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : int = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return image
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : int = torch.load(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : Tuple = EfficientFormerConfig.from_json_file(_lowerCAmelCase )
A : Optional[int] = EfficientFormerForImageClassificationWithTeacher(_lowerCAmelCase )
A : int = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
A : Optional[int] = config.depths[-1] - config.num_metaad_blocks + 1
A : Dict = convert_torch_checkpoint(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
A : Dict = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
A : str = prepare_img()
A : int = 256
A : Any = 224
A : List[str] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
A : int = processor(images=_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
# original processing pipeline
A : Dict = Compose(
[
Resize(_lowerCAmelCase , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(_lowerCAmelCase ),
ToTensor(),
Normalize(_lowerCAmelCase , _lowerCAmelCase ),
] )
A : Any = image_transforms(_lowerCAmelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
A : Any = model(_lowerCAmelCase )
A : Any = outputs.logits
A : Any = (1, 1000)
if "l1" in model_name:
A : Tuple = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , _lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A : List[Any] = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , _lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A : str = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCAmelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE_:Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 520 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'conditional_detr'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=3 , lowerCamelCase=300 , lowerCamelCase=6 , lowerCamelCase=2048 , lowerCamelCase=8 , lowerCamelCase=6 , lowerCamelCase=2048 , lowerCamelCase=8 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="relu" , lowerCamelCase=256 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1.0 , lowerCamelCase=False , lowerCamelCase="sine" , lowerCamelCase="resnet50" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=2 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=0.25 , **lowerCamelCase , ) -> Optional[Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case__ : List[str] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Optional[int] = backbone_config.get('''model_type''' )
snake_case__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
snake_case__ : Dict = config_class.from_dict(lowerCamelCase )
snake_case__ : Union[str, Any] = use_timm_backbone
snake_case__ : Optional[Any] = backbone_config
snake_case__ : Dict = num_channels
snake_case__ : List[str] = num_queries
snake_case__ : int = d_model
snake_case__ : Optional[Any] = encoder_ffn_dim
snake_case__ : Union[str, Any] = encoder_layers
snake_case__ : List[str] = encoder_attention_heads
snake_case__ : List[Any] = decoder_ffn_dim
snake_case__ : str = decoder_layers
snake_case__ : Optional[int] = decoder_attention_heads
snake_case__ : Union[str, Any] = dropout
snake_case__ : List[str] = attention_dropout
snake_case__ : List[Any] = activation_dropout
snake_case__ : Union[str, Any] = activation_function
snake_case__ : List[Any] = init_std
snake_case__ : str = init_xavier_std
snake_case__ : List[Any] = encoder_layerdrop
snake_case__ : int = decoder_layerdrop
snake_case__ : int = encoder_layers
snake_case__ : List[Any] = auxiliary_loss
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Optional[Any] = backbone
snake_case__ : List[Any] = use_pretrained_backbone
snake_case__ : Optional[int] = dilation
# Hungarian matcher
snake_case__ : Tuple = class_cost
snake_case__ : List[Any] = bbox_cost
snake_case__ : str = giou_cost
# Loss coefficients
snake_case__ : List[str] = mask_loss_coefficient
snake_case__ : List[Any] = dice_loss_coefficient
snake_case__ : Optional[Any] = cls_loss_coefficient
snake_case__ : Optional[int] = bbox_loss_coefficient
snake_case__ : int = giou_loss_coefficient
snake_case__ : Tuple = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return self.d_model
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case__ : str = self.backbone_config.to_dict()
snake_case__ : str = self.__class__.model_type
return output
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def lowercase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return 12
| 261 |
'''simple docstring'''
import numpy as np
def _A ( snake_case__ : np.ndarray , snake_case__ : np.ndarray , snake_case__ : float = 1E-12 , snake_case__ : int = 1_00 , ):
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case__ ) == np.iscomplexobj(snake_case__ )
snake_case__ : Tuple = np.iscomplexobj(snake_case__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
snake_case__ : str = False
snake_case__ : Any = 0
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = 1E12
while not convergence:
# Multiple matrix by the vector.
snake_case__ : Any = np.dot(snake_case__ , snake_case__ )
# Normalize the resulting output vector.
snake_case__ : Dict = w / np.linalg.norm(snake_case__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
snake_case__ : Tuple = vector.conj().T if is_complex else vector.T
snake_case__ : Optional[Any] = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__ ) )
# Check convergence.
snake_case__ : Union[str, Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
snake_case__ : Optional[int] = True
snake_case__ : int = lambda_
if is_complex:
snake_case__ : Optional[int] = np.real(lambda_ )
return lambda_, vector
def _A ( ):
snake_case__ : int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
snake_case__ : List[Any] = np.array([41, 4, 20] )
snake_case__ : str = real_input_matrix.astype(np.complexaaa )
snake_case__ : str = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
snake_case__ : List[str] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
snake_case__ : Dict = real_input_matrix
snake_case__ : str = real_vector
elif problem_type == "complex":
snake_case__ : Optional[Any] = complex_input_matrix
snake_case__ : Any = complex_vector
# Our implementation.
snake_case__ ,snake_case__ : Optional[Any] = power_iteration(snake_case__ , snake_case__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
snake_case__ ,snake_case__ : int = np.linalg.eigh(snake_case__ )
# Last eigenvalue is the maximum one.
snake_case__ : List[str] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
snake_case__ : Dict = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case__ ) - np.abs(snake_case__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 261 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_lowercase = None
_lowercase = logging.get_logger(__name__)
_lowercase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_lowercase = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
_lowercase = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
_lowercase = """▁"""
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[Any] = ['''input_ids''', '''attention_mask''']
_lowercase : Tuple = BarthezTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , **_lowercase , )
_lowerCAmelCase = vocab_file
_lowerCAmelCase = False if not self.vocab_file else True
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
_lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 162 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_lowercase = ["""bert-base-uncased""", """bert-base-cased"""]
_lowercase = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class UpperCAmelCase_ ( tf.keras.Model ):
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = tokenizer
_lowerCAmelCase = AutoConfig.from_pretrained(_lowercase )
_lowerCAmelCase = TFAutoModel.from_config(_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer(_lowercase )
_lowerCAmelCase = self.bert(**_lowercase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
_lowerCAmelCase = [
BertTokenizer.from_pretrained(_lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_lowerCAmelCase = [TFBertTokenizer.from_pretrained(_lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_lowercase , use_fast_bert_tokenizer=_lowercase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_lowerCAmelCase = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
_lowerCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _lowercase ( self ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCAmelCase = tokenizer(_lowercase , return_tensors="""tf""" , padding="""longest""" )
_lowerCAmelCase = tf_tokenizer(_lowercase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase = tf_tokenizer(self.paired_sentences )
_lowerCAmelCase = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase = tf.function(_lowercase )
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCAmelCase = tf.constant(_lowercase )
_lowerCAmelCase = compiled_tokenizer(_lowercase )
_lowerCAmelCase = tf_tokenizer(_lowercase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowercase ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase = ModelToSave(tokenizer=_lowercase )
_lowerCAmelCase = tf.convert_to_tensor(self.test_sentences )
_lowerCAmelCase = model(_lowercase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCAmelCase = Path(_lowercase ) / """saved.model"""
model.save(_lowercase )
_lowerCAmelCase = tf.keras.models.load_model(_lowercase )
_lowerCAmelCase = loaded_model(_lowercase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 162 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowerCamelCase_ ( ):
lowercase : Optional[Any] = input('''Enter message: ''' )
lowercase : Optional[Any] = input('''Enter key [alphanumeric]: ''' )
lowercase : Union[str, Any] = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowercase : str = '''encrypt'''
lowercase : Optional[Any] = encrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
elif mode.lower().startswith('''d''' ):
lowercase : str = '''decrypt'''
lowercase : Optional[int] = decrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
print(f'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , '''encrypt''' )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , '''decrypt''' )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowercase : Optional[Any] = []
lowercase : Tuple = 0
lowercase : str = key.upper()
for symbol in message:
lowercase : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase_ ):
lowercase : List[str] = 0
else:
translated.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 583 | 0 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_a : Optional[Any]= "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
_a : List[Any]= "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
_a : str= "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
_a : int= "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
_a : Union[str, Any]= "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def _lowercase (self : Dict) -> int:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string')),
'references': datasets.Value('string'),
}) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def _lowercase (self : int , _A : Any , _A : List[Any] , _A : int=[1, 10, 1_00] , _A : Union[str, Any]=4 , _A : List[str]=3.0) -> Union[str, Any]:
if os.getenv('HF_ALLOW_CODE_EVAL' , 0) != "1":
raise ValueError(_WARNING)
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.')
with ThreadPoolExecutor(max_workers=_A) as executor:
__snake_case : int = []
__snake_case : Tuple = Counter()
__snake_case : Any = 0
__snake_case : str = defaultdict(_A)
for task_id, (candidates, test_case) in enumerate(zip(_A , _A)):
for candidate in candidates:
__snake_case : Dict = candidate + '\n' + test_case
__snake_case : Dict = (test_program, timeout, task_id, completion_id[task_id])
__snake_case : Tuple = executor.submit(_A , *_A)
futures.append(_A)
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_A):
__snake_case : Dict = future.result()
results[result["task_id"]].append((result['completion_id'], result))
__snake_case , __snake_case : Optional[int] = [], []
for result in results.values():
result.sort()
__snake_case : str = [r[1]['passed'] for r in result]
total.append(len(_A))
correct.append(sum(_A))
__snake_case : Optional[int] = np.array(_A)
__snake_case : List[str] = np.array(_A)
__snake_case : List[str] = k
__snake_case : str = {f"pass@{k}": estimate_pass_at_k(_A , _A , _A).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
def estimator(UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__snake_case : Optional[int] = itertools.repeat(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
else:
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
__snake_case : Any = iter(UpperCAmelCase_ )
return np.array([estimator(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , UpperCAmelCase_ ) for n, c in zip(UpperCAmelCase_ , UpperCAmelCase_ )] )
| 192 | """simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : List[Any]) -> Dict:
__snake_case : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
__snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(_A)
__snake_case : Union[str, Any] = -1
__snake_case : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
__snake_case : Any = model.generate(_A , max_new_tokens=10 , do_sample=_A)
__snake_case : str = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
__snake_case : List[str] = TextStreamer(_A)
model.generate(_A , max_new_tokens=10 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__snake_case : Optional[Any] = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowercase (self : int) -> Optional[Any]:
__snake_case : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(_A)
__snake_case : Optional[int] = -1
__snake_case : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
__snake_case : int = model.generate(_A , max_new_tokens=10 , do_sample=_A)
__snake_case : int = tokenizer.decode(greedy_ids[0])
__snake_case : int = TextIteratorStreamer(_A)
__snake_case : List[Any] = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__snake_case : Any = Thread(target=model.generate , kwargs=_A)
thread.start()
__snake_case : Dict = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_A , _A)
def _lowercase (self : Any) -> List[str]:
__snake_case : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
__snake_case : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(_A)
__snake_case : Union[str, Any] = -1
__snake_case : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
__snake_case : Optional[int] = model.generate(_A , max_new_tokens=10 , do_sample=_A)
__snake_case : Union[str, Any] = greedy_ids[:, input_ids.shape[1] :]
__snake_case : Optional[Any] = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
__snake_case : str = TextStreamer(_A , skip_prompt=_A)
model.generate(_A , max_new_tokens=10 , do_sample=_A , streamer=_A)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__snake_case : List[str] = cs.out[:-1]
self.assertEqual(_A , _A)
def _lowercase (self : List[str]) -> Dict:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__snake_case : int = AutoTokenizer.from_pretrained('distilgpt2')
__snake_case : Dict = AutoModelForCausalLM.from_pretrained('distilgpt2').to(_A)
__snake_case : Union[str, Any] = -1
__snake_case : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__snake_case : Tuple = TextStreamer(_A , skip_special_tokens=_A)
model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__snake_case : Tuple = cs.out[:-1] # Remove the final "\n"
__snake_case : int = tokenizer(_A , return_tensors='pt')
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def _lowercase (self : Optional[int]) -> List[str]:
__snake_case : Optional[int] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(_A)
__snake_case : int = -1
__snake_case : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A)
__snake_case : List[Any] = TextIteratorStreamer(_A , timeout=0.001)
__snake_case : str = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__snake_case : str = Thread(target=model.generate , kwargs=_A)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_A):
__snake_case : Any = ''
for new_text in streamer:
streamer_text += new_text
| 192 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class A__( unittest.TestCase ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str]=7 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : Optional[Any]=4_00 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : str=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=1 / 2_55 , __SCREAMING_SNAKE_CASE : Tuple=True , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_pad
def _a ( self : str ) -> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=False ) -> List[str]:
"""simple docstring"""
if not batched:
__SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.size
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
__SCREAMING_SNAKE_CASE = int(self.size['''shortest_edge'''] * h / w )
__SCREAMING_SNAKE_CASE = self.size['''shortest_edge''']
elif w > h:
__SCREAMING_SNAKE_CASE = self.size['''shortest_edge''']
__SCREAMING_SNAKE_CASE = int(self.size['''shortest_edge'''] * w / h )
else:
__SCREAMING_SNAKE_CASE = self.size['''shortest_edge''']
__SCREAMING_SNAKE_CASE = self.size['''shortest_edge''']
else:
__SCREAMING_SNAKE_CASE = []
for image in image_inputs:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
__SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__( __magic_name__ , unittest.TestCase ):
lowerCAmelCase = DeformableDetrImageProcessor if is_vision_available() else None
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DeformableDetrImageProcessingTester(self )
@property
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_rescale''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_pad''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a ( self : int ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__SCREAMING_SNAKE_CASE = json.loads(f.read() )
__SCREAMING_SNAKE_CASE = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
__SCREAMING_SNAKE_CASE = DeformableDetrImageProcessor()
__SCREAMING_SNAKE_CASE = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# verify pixel values
__SCREAMING_SNAKE_CASE = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__SCREAMING_SNAKE_CASE = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
__SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__SCREAMING_SNAKE_CASE = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
__SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __SCREAMING_SNAKE_CASE ) )
# verify orig_size
__SCREAMING_SNAKE_CASE = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __SCREAMING_SNAKE_CASE ) )
# verify size
__SCREAMING_SNAKE_CASE = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __SCREAMING_SNAKE_CASE ) )
@slow
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__SCREAMING_SNAKE_CASE = json.loads(f.read() )
__SCREAMING_SNAKE_CASE = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
__SCREAMING_SNAKE_CASE = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__SCREAMING_SNAKE_CASE = DeformableDetrImageProcessor(format='''coco_panoptic''' )
__SCREAMING_SNAKE_CASE = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , masks_path=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# verify pixel values
__SCREAMING_SNAKE_CASE = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__SCREAMING_SNAKE_CASE = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
__SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__SCREAMING_SNAKE_CASE = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
__SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __SCREAMING_SNAKE_CASE ) )
# verify masks
__SCREAMING_SNAKE_CASE = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __SCREAMING_SNAKE_CASE )
# verify orig_size
__SCREAMING_SNAKE_CASE = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __SCREAMING_SNAKE_CASE ) )
# verify size
__SCREAMING_SNAKE_CASE = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __SCREAMING_SNAKE_CASE ) )
| 482 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class A__( __magic_name__ ):
lowerCAmelCase = '''codegen'''
lowerCAmelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=5_04_00 , __SCREAMING_SNAKE_CASE : Optional[Any]=20_48 , __SCREAMING_SNAKE_CASE : str=20_48 , __SCREAMING_SNAKE_CASE : Optional[int]=40_96 , __SCREAMING_SNAKE_CASE : Dict=28 , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : List[Any]=64 , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Dict="gelu_new" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-5 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=5_02_56 , __SCREAMING_SNAKE_CASE : List[Any]=5_02_56 , __SCREAMING_SNAKE_CASE : List[Any]=False , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_ctx
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_embd
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = n_inner
__SCREAMING_SNAKE_CASE = rotary_dim
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = attn_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(
bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class A__( __magic_name__ ):
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" , __SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> List[Any]:
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , __SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__SCREAMING_SNAKE_CASE = 0
@property
def _a ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' )
__SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _a ( self : Any ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
return self._config.n_head
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE = seqlen + 2
__SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__SCREAMING_SNAKE_CASE = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE = common_inputs['''attention_mask''']
if self.use_past:
__SCREAMING_SNAKE_CASE = ordered_inputs['''attention_mask'''].dtype
__SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
return 13
| 482 | 1 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , ):
"""simple docstring"""
__UpperCAmelCase = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
__UpperCAmelCase , __UpperCAmelCase = input_paths_and_base_extractors[compression_format]
if input_path is None:
__UpperCAmelCase = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase__ )
assert base_extractor.is_extractable(UpperCamelCase__ )
__UpperCAmelCase = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__UpperCAmelCase = file_path.read_text(encoding='''utf-8''' )
else:
__UpperCAmelCase = output_path.read_text(encoding='''utf-8''' )
__UpperCAmelCase = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , ):
"""simple docstring"""
__UpperCAmelCase = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
__UpperCAmelCase = input_paths[compression_format]
if input_path is None:
__UpperCAmelCase = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase__ )
__UpperCAmelCase = Extractor.infer_extractor_format(UpperCamelCase__ )
assert extractor_format is not None
__UpperCAmelCase = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__UpperCAmelCase = file_path.read_text(encoding='''utf-8''' )
else:
__UpperCAmelCase = output_path.read_text(encoding='''utf-8''' )
__UpperCAmelCase = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
import tarfile
__UpperCAmelCase = tmp_path / '''data_dot_dot'''
directory.mkdir()
__UpperCAmelCase = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(UpperCamelCase__ , '''w''' ) as f:
f.add(UpperCamelCase__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def lowerCAmelCase ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
import tarfile
__UpperCAmelCase = tmp_path / '''data_sym_link'''
directory.mkdir()
__UpperCAmelCase = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=UpperCamelCase__ )
with tarfile.TarFile(UpperCamelCase__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
__UpperCAmelCase = insecure_tar_files[insecure_tar_file]
__UpperCAmelCase = tmp_path / '''extracted'''
TarExtractor.extract(UpperCamelCase__ , UpperCamelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCAmelCase ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__UpperCAmelCase = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
__UpperCAmelCase = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(UpperCamelCase__ )
assert zipfile.is_zipfile(str(UpperCamelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCamelCase__ ) # but we're right
| 654 | '''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 654 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = '''Hello, World!'''
_snake_case = '''en_XX'''
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCamelCase = Path('data_bin' )
UpperCamelCase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(_lowercase ) , bpe='sentencepiece' , sentencepiece_model=str(Path(_lowercase ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(_lowercase )
UpperCamelCase = xmod.model.encoder.sentence_encoder
UpperCamelCase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCamelCase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , _lowercase )
UpperCamelCase = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCamelCase = xmod_sent_encoder.embed_tokens.weight
UpperCamelCase = xmod_sent_encoder.embed_positions.weight
UpperCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCamelCase = xmod_sent_encoder.layernorm_embedding.weight
UpperCamelCase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCamelCase = model.roberta.encoder.layer[i]
UpperCamelCase = xmod_sent_encoder.layers[i]
# self attention
UpperCamelCase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
UpperCamelCase = xmod_layer.self_attn.q_proj.weight
UpperCamelCase = xmod_layer.self_attn.q_proj.bias
UpperCamelCase = xmod_layer.self_attn.k_proj.weight
UpperCamelCase = xmod_layer.self_attn.k_proj.bias
UpperCamelCase = xmod_layer.self_attn.v_proj.weight
UpperCamelCase = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCamelCase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
UpperCamelCase = xmod_layer.self_attn.out_proj.weight
UpperCamelCase = xmod_layer.self_attn.out_proj.bias
UpperCamelCase = xmod_layer.self_attn_layer_norm.weight
UpperCamelCase = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCamelCase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
UpperCamelCase = xmod_layer.fca.weight
UpperCamelCase = xmod_layer.fca.bias
# output
UpperCamelCase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
UpperCamelCase = xmod_layer.fca.weight
UpperCamelCase = xmod_layer.fca.bias
UpperCamelCase = xmod_layer.final_layer_norm.weight
UpperCamelCase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCamelCase = xmod_layer.adapter_layer_norm.weight
UpperCamelCase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCamelCase = bert_output.adapter_modules[lang_code]
UpperCamelCase = xmod_layer.adapter_modules[lang_code]
UpperCamelCase = from_adapter.fca.weight
UpperCamelCase = from_adapter.fca.bias
UpperCamelCase = from_adapter.fca.weight
UpperCamelCase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCamelCase = xmod_sent_encoder.layer_norm.weight
UpperCamelCase = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCamelCase = xmod.model.classification_heads['mnli'].dense.weight
UpperCamelCase = xmod.model.classification_heads['mnli'].dense.bias
UpperCamelCase = xmod.model.classification_heads['mnli'].out_proj.weight
UpperCamelCase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
UpperCamelCase = xmod.model.encoder.lm_head.dense.weight
UpperCamelCase = xmod.model.encoder.lm_head.dense.bias
UpperCamelCase = xmod.model.encoder.lm_head.layer_norm.weight
UpperCamelCase = xmod.model.encoder.lm_head.layer_norm.bias
UpperCamelCase = xmod.model.encoder.lm_head.weight
UpperCamelCase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCamelCase = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
UpperCamelCase = model(_lowercase )[0]
if classification_head:
UpperCamelCase = xmod.model.classification_heads['mnli'](xmod.extract_features(_lowercase ) )
else:
UpperCamelCase = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
UpperCamelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
UpperCamelCase = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_snake_case = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 282 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
# setable values
SCREAMING_SNAKE_CASE_ : Optional[int] =None
SCREAMING_SNAKE_CASE_ : Optional[jnp.ndarray] =None
SCREAMING_SNAKE_CASE_ : Optional[jnp.ndarray] =None # sigma(t_i)
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] ):
"""simple docstring"""
return cls()
@dataclass
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : jnp.ndarray
SCREAMING_SNAKE_CASE_ : jnp.ndarray
SCREAMING_SNAKE_CASE_ : KarrasVeSchedulerState
class _lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1_00 , SCREAMING_SNAKE_CASE__ : float = 1.007 , SCREAMING_SNAKE_CASE__ : float = 80 , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 50 , ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
return KarrasVeSchedulerState.create()
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple = () ):
"""simple docstring"""
UpperCamelCase = jnp.arange(0 , SCREAMING_SNAKE_CASE__ )[::-1].copy()
UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE__ , schedule=jnp.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa ) , timesteps=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : random.KeyArray , ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase = random.split(SCREAMING_SNAKE_CASE__ , num=1 )
UpperCamelCase = self.config.s_noise * random.normal(key=SCREAMING_SNAKE_CASE__ , shape=sample.shape )
UpperCamelCase = sigma + gamma * sigma
UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_hat + sigma_hat * model_output
UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ , derivative=SCREAMING_SNAKE_CASE__ , state=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_prev + sigma_prev * model_output
UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ , derivative=SCREAMING_SNAKE_CASE__ , state=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KarrasVeSchedulerState , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
raise NotImplementedError()
| 282 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__a : str = logging.get_logger(__name__)
class A ( lowerCamelCase_ ):
def __init__( self : int , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Optional[int] ) -> None:
"""simple docstring"""
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 559 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
def a_ ( __snake_case ) -> str:
'''simple docstring'''
UpperCamelCase_ = torch.load(__snake_case , map_location='cpu' )
if "model" in sd.keys():
UpperCamelCase_ = torch.load(__snake_case , map_location='cpu' )['model']
# pop unnecessary weights
UpperCamelCase_ = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__snake_case )
UpperCamelCase_ = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCamelCase_ = sd.pop(__snake_case )
UpperCamelCase_ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCamelCase_ = sd[key]
# We split QKV in separate Q,K,V
UpperCamelCase_ = key.replace('.qkv_proj.' , '.q_proj.' )
UpperCamelCase_ = key.replace('.qkv_proj.' , '.k_proj.' )
UpperCamelCase_ = key.replace('.qkv_proj.' , '.v_proj.' )
UpperCamelCase_ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = torch.split(__snake_case , depth // 3 , dim=0 )
UpperCamelCase_ = q
UpperCamelCase_ = k
UpperCamelCase_ = v
del sd[key]
return sd
@torch.no_grad()
def a_ ( __snake_case , __snake_case , __snake_case=None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase_ = load_checkpoint(__snake_case )
if config is not None:
UpperCamelCase_ = OPTConfig.from_pretrained(__snake_case )
else:
UpperCamelCase_ = OPTConfig()
UpperCamelCase_ = OPTModel(__snake_case ).half().eval()
model.load_state_dict(__snake_case )
# Check results
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
__a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__a : Union[str, Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 559 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowercase :
"""simple docstring"""
_UpperCamelCase : str = BlenderbotConfig
_UpperCamelCase : str = {}
_UpperCamelCase : str = "gelu"
def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any]=13 , lowerCamelCase_ : List[str]=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Tuple=99 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : int=20 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : Union[str, Any]=0 , ):
'''simple docstring'''
_snake_case : Dict = parent
_snake_case : Optional[Any] = batch_size
_snake_case : str = seq_length
_snake_case : Optional[Any] = is_training
_snake_case : str = use_labels
_snake_case : Optional[int] = vocab_size
_snake_case : Dict = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Tuple = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[Any] = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : Optional[Any] = eos_token_id
_snake_case : int = pad_token_id
_snake_case : Any = bos_token_id
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : str = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_snake_case : Dict = prepare_blenderbot_inputs_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, inputs_dict
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = TFBlenderbotModel(config=lowerCamelCase_ ).get_decoder()
_snake_case : str = inputs_dict['input_ids']
_snake_case : str = input_ids[:1, :]
_snake_case : int = inputs_dict['attention_mask'][:1, :]
_snake_case : int = inputs_dict['head_mask']
_snake_case : Optional[Any] = 1
# first forward pass
_snake_case : Dict = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , head_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ )
_snake_case , _snake_case : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : Optional[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
_snake_case : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , rtol=1e-3 )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ):
if attention_mask is None:
_snake_case : Dict = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : str = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
_UpperCamelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
_UpperCamelCase : Union[str, Any] = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : Dict = False
_UpperCamelCase : Union[str, Any] = False
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = TFBlenderbotModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_ )
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = ["My friends are cool but they eat too many carbs."]
_UpperCamelCase : List[str] = "facebook/blenderbot-400M-distill"
@cached_property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.tokenizer(self.src_text , return_tensors='tf' )
_snake_case : Union[str, Any] = self.model.generate(
model_inputs.input_ids , )
_snake_case : str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCamelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 304 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def A__( __lowerCAmelCase ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase_ : nn.Module , lowerCamelCase_ : int ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[Any] = module
_snake_case : int = nn.Sequential(
nn.Linear(module.in_features , lowerCamelCase_ , bias=lowerCamelCase_ ) , nn.Linear(lowerCamelCase_ , module.out_features , bias=lowerCamelCase_ ) , )
_snake_case : Dict = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Optional[int] , *lowerCamelCase_ : Any , **lowerCamelCase_ : int ):
'''simple docstring'''
return self.module(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) + self.adapter(lowerCamelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = "bigscience/bloom-1b7"
# Constant values
_UpperCamelCase : List[Any] = 2.1_09_65_95_52_69_25_74
_UpperCamelCase : List[Any] = "Hello my name is"
_UpperCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_UpperCamelCase : Tuple = 10
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained(self.model_name )
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
_snake_case : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
_snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = self.model_abit.config
self.assertTrue(hasattr(lowerCamelCase_ , 'quantization_config' ) )
_snake_case : List[str] = config.to_dict()
_snake_case : str = config.to_diff_dict()
_snake_case : List[Any] = config.to_json_string()
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
_snake_case : Union[str, Any] = self.model_fpaa.get_memory_footprint()
_snake_case : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
_snake_case : List[str] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCamelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
_snake_case : int = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase_ ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = BitsAndBytesConfig()
_snake_case : int = True
_snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase_ , device_map='auto' )
_snake_case : Tuple = self.tokenizer(self.input_text , return_tensors='pt' )
_snake_case : List[str] = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase_ ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = BitsAndBytesConfig()
with self.assertRaises(lowerCamelCase_ ):
_snake_case : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase_ , load_in_abit=lowerCamelCase_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_snake_case : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
_snake_case : Optional[int] = self.model_fpaa.to(torch.floataa )
_snake_case : Any = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
_snake_case : Any = self.model_fpaa.to('cpu' )
# Check this does not throw an error
_snake_case : Optional[Any] = self.model_fpaa.half()
# Check this does not throw an error
_snake_case : int = self.model_fpaa.float()
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=lowerCamelCase_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __UpperCAmelCase ( cls : Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = 't5-small'
_snake_case : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
_snake_case : Tuple = AutoTokenizer.from_pretrained(cls.model_name )
_snake_case : int = 'Translate in German: Hello, my dog is cute'
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
_snake_case : Dict = TaForConditionalGeneration._keep_in_fpaa_modules
_snake_case : List[Any] = None
# test with `t5-small`
_snake_case : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
_snake_case : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_snake_case : Union[str, Any] = model.generate(**lowerCamelCase_ )
# test with `flan-t5-small`
_snake_case : Any = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
_snake_case : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_snake_case : Union[str, Any] = model.generate(**lowerCamelCase_ )
_snake_case : List[str] = modules
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_snake_case : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
_snake_case : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_snake_case : List[Any] = model.generate(**lowerCamelCase_ )
# test with `flan-t5-small`
_snake_case : Any = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
_snake_case : Dict = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_snake_case : Dict = model.generate(**lowerCamelCase_ )
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# model_name
_snake_case : Tuple = 'bigscience/bloom-560m'
_snake_case : Optional[Any] = 't5-small'
# Different types of model
_snake_case : str = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
# Sequence classification model
_snake_case : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
# CausalLM model
_snake_case : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
# Seq2seq model
_snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCamelCase_ , device_map='auto' )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_snake_case : Optional[Any] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
_snake_case : Any = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
_snake_case : int = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase_ ) , self.EXPECTED_OUTPUTS )
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : int = 'facebook/opt-350m'
super().setUp()
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
_snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
_snake_case : Optional[int] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_snake_case : List[str] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCamelCase_ ) ):
_snake_case : str = LoRALayer(module.q_proj , rank=16 )
_snake_case : Optional[Any] = LoRALayer(module.k_proj , rank=16 )
_snake_case : str = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
_snake_case : Union[str, Any] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_snake_case : Optional[Any] = model.forward(**lowerCamelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCamelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = "gpt2-xl"
_UpperCamelCase : Optional[Any] = 3.31_91_85_48_54_15_21_87
| 304 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a_ : Tuple = ['gpt2']
a_ : int = 'gpt2'
if is_tf_available():
class SCREAMING_SNAKE_CASE_ ( tf.Module ):
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
super().__init__()
__magic_name__ = tokenizer
__magic_name__ = AutoConfig.from_pretrained(A )
__magic_name__ = TFGPTaLMHeadModel.from_config(A )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def __A ( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.tokenizer(A )
__magic_name__ = tokenized['''input_ids'''].to_tensor()
__magic_name__ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__magic_name__ = self.model(input_ids=A , attention_mask=A )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__magic_name__ = [GPTaTokenizer.from_pretrained(A ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__magic_name__ = [TFGPTaTokenizer.from_pretrained(A ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__magic_name__ = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__magic_name__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__magic_name__ = tokenizer([test_inputs] , return_tensors='''tf''' )
__magic_name__ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__magic_name__ = python_outputs[key].numpy()
__magic_name__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(A , tf.intaa ) == tf_outputs_values ) )
@slow
def __A ( self ) -> str:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__magic_name__ = tf.function(A )
for test_inputs in self.test_sentences:
__magic_name__ = tf.constant(A )
__magic_name__ = compiled_tokenizer(A )
__magic_name__ = tf_tokenizer(A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__magic_name__ = ModelToSave(tokenizer=A )
__magic_name__ = tf.convert_to_tensor([self.test_sentences[0]] )
__magic_name__ = model.serving(A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__magic_name__ = Path(A ) / '''saved.model'''
tf.saved_model.save(A , A , signatures={'''serving_default''': model.serving} )
__magic_name__ = tf.saved_model.load(A )
__magic_name__ = loaded_model.signatures['''serving_default'''](A )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__magic_name__ = tf.convert_to_tensor([self.test_sentences[0]] )
__magic_name__ = tf_tokenizer(A ) # Build model with some sample inputs
__magic_name__ = tf_tokenizer.get_config()
__magic_name__ = TFGPTaTokenizer.from_config(A )
__magic_name__ = model_from_config(A )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__magic_name__ = 12_31_23
for max_length in [3, 5, 10_24]:
__magic_name__ = tf.convert_to_tensor([self.test_sentences[0]] )
__magic_name__ = tf_tokenizer(A , max_length=A )
__magic_name__ = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length | 713 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ : Optional[int] = 16
a_ : int = 32
def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
__magic_name__ = AutoTokenizer.from_pretrained(snake_case_ )
__magic_name__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__magic_name__ = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__magic_name__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ):
model.eval()
__magic_name__ = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__magic_name__ , __magic_name__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
__magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
__magic_name__ = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
# Initialize accelerator
__magic_name__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config['''lr''']
__magic_name__ = int(config['''num_epochs'''] )
__magic_name__ = int(config['''seed'''] )
__magic_name__ = int(config['''batch_size'''] )
__magic_name__ = args.model_name_or_path
set_seed(snake_case_ )
__magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__magic_name__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__magic_name__ = 1
__magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__magic_name__ = 0
# We also need to keep track of the stating epoch so files are named properly
__magic_name__ = 0
__magic_name__ = evaluate.load('''glue''' , '''mrpc''' )
__magic_name__ = num_epochs
if args.partial_train_epoch is not None:
__magic_name__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1]
__magic_name__ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__magic_name__ = int(snake_case_ ) + 1
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.print('''resumed checkpoint performance:''' , snake_case_ )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
__magic_name__ = json.load(snake_case_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__magic_name__ = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
__magic_name__ = model(**snake_case_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__magic_name__ = f'epoch_{epoch}'
__magic_name__ = os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
__magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__magic_name__ = accuracy
__magic_name__ = lr_scheduler.get_lr()[0]
__magic_name__ = optimizer.param_groups[0]['''lr''']
__magic_name__ = epoch
__magic_name__ = overall_step
accelerator.print(f'epoch {epoch}:' , snake_case_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , )
parser.add_argument(
'''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , )
__magic_name__ = parser.parse_args()
__magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 678 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( a , a , a ):
if len(a ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
__snake_case = (left + right) >> 1 # the middle
__snake_case = find_max(a , a , a ) # find max in range[left, mid]
__snake_case = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 356 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase = 2
class a_ :
def __init__( self : Tuple , *, # begin keyword-only arguments
__lowerCAmelCase : Optional[Any]="<s>" , __lowerCAmelCase : Dict="<pad>" , __lowerCAmelCase : Union[str, Any]="</s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Optional[int]=None , ):
__snake_case , __snake_case , __snake_case , __snake_case = bos, unk, pad, eos
__snake_case = []
__snake_case = []
__snake_case = {}
__snake_case = self.add_symbol(__lowerCAmelCase )
__snake_case = self.add_symbol(__lowerCAmelCase )
__snake_case = self.add_symbol(__lowerCAmelCase )
__snake_case = self.add_symbol(__lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__lowerCAmelCase )
__snake_case = len(self.symbols )
def __eq__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
return self.indices == other.indices
def __getitem__( self : Optional[int] , __lowerCAmelCase : Any ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Tuple ):
return len(self.symbols )
def __contains__( self : str , __lowerCAmelCase : Dict ):
return sym in self.indices
@classmethod
def lowercase__ ( cls : str , __lowerCAmelCase : Tuple ):
__snake_case = cls()
d.add_from_file(__lowerCAmelCase )
return d
def lowercase__ ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[Any]=False ):
if word in self.indices and not overwrite:
__snake_case = self.indices[word]
__snake_case = self.count[idx] + n
return idx
else:
__snake_case = len(self.symbols )
__snake_case = idx
self.symbols.append(__lowerCAmelCase )
self.count.append(__lowerCAmelCase )
return idx
def lowercase__ ( self : List[str] , __lowerCAmelCase : int ):
return 0
def lowercase__ ( self : List[str] , __lowerCAmelCase : List[str] ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(__lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(__lowerCAmelCase ) )
return
__snake_case = f.readlines()
__snake_case = self._load_meta(__lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__snake_case , __snake_case = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
__snake_case = True
__snake_case , __snake_case = line.rsplit(' ' , 1 )
else:
__snake_case = False
__snake_case = int(__lowerCAmelCase )
__snake_case = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(__lowerCAmelCase ) )
self.add_symbol(__lowerCAmelCase , n=__lowerCAmelCase , overwrite=__lowerCAmelCase )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def lowerCamelCase__ ( a ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__snake_case = dict((re.sub(r'@@$' , '' , a ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , a ), v) for k, v in d.items() )
__snake_case = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
__snake_case = d[k] # restore
return da
def lowerCamelCase__ ( a , a ):
# prep
if not os.path.exists(a ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(a , exist_ok=a )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__snake_case = os.path.join(a , 'checkpoint.pt' )
if not os.path.isfile(a ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
__snake_case = torch.load(a , map_location='cpu' )
__snake_case = chkpt['cfg']['model']
# dicts
__snake_case = os.path.join(a , 'dict.txt' )
if not os.path.isfile(a ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
__snake_case = Dictionary.load(a )
__snake_case = rewrite_dict_keys(src_dict.indices )
__snake_case = len(a )
__snake_case = os.path.join(a , VOCAB_FILES_NAMES['vocab_file'] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(a , ensure_ascii=a , indent=a ) )
# merges_file (bpecodes)
__snake_case = os.path.join(a , 'bpecodes' )
if not os.path.isfile(a ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
__snake_case = os.path.join(a , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(a , a )
# model config
__snake_case = os.path.join(a , 'config.json' )
__snake_case = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(a , ensure_ascii=a , indent=a ) )
# tokenizer config
__snake_case = os.path.join(a , a )
__snake_case = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(a , ensure_ascii=a , indent=a ) )
# model
__snake_case = chkpt['model']
# remove unneeded keys
__snake_case = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(a , a )
__snake_case = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
__snake_case = model_state_dict.pop(a )
else:
__snake_case = model_state_dict.pop(a )
__snake_case = BioGptConfig.from_pretrained(a )
__snake_case = BioGptForCausalLM(a )
# check that it loads ok
model_new.load_state_dict(a )
# save
__snake_case = os.path.join(a , a )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(a , a )
print('Conversion is done!' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowercase = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 356 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: int = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCamelCase__: List[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__: int = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__: Dict = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__: str = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
@slow
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Tuple = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCamelCase__: Union[str, Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__: int = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__: str = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__: Union[str, Any] = model(__lowerCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3 ) )
| 720 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
"""simple docstring"""
def __init__( self: Dict , __lowerCamelCase: Dict , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=32 , __lowerCamelCase: str=3 , __lowerCamelCase: str=10 , __lowerCamelCase: str=[10, 20, 30, 40] , __lowerCamelCase: List[str]=[1, 1, 2, 1] , __lowerCamelCase: Dict=True , __lowerCamelCase: Any=True , __lowerCamelCase: Dict="relu" , __lowerCamelCase: Tuple=3 , __lowerCamelCase: Optional[int]=None , ):
'''simple docstring'''
UpperCamelCase__: Tuple = parent
UpperCamelCase__: int = batch_size
UpperCamelCase__: Optional[int] = image_size
UpperCamelCase__: Tuple = num_channels
UpperCamelCase__: List[str] = embeddings_size
UpperCamelCase__: int = hidden_sizes
UpperCamelCase__: Optional[int] = depths
UpperCamelCase__: Optional[Any] = is_training
UpperCamelCase__: Union[str, Any] = use_labels
UpperCamelCase__: Optional[Any] = hidden_act
UpperCamelCase__: Optional[Any] = num_labels
UpperCamelCase__: Any = scope
UpperCamelCase__: List[Any] = len(__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__: List[Any] = None
if self.use_labels:
UpperCamelCase__: Any = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__: Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Dict = TFRegNetModel(config=__lowerCamelCase )
UpperCamelCase__: str = model(__lowerCamelCase , training=__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Dict , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = self.num_labels
UpperCamelCase__: Optional[Any] = TFRegNetForImageClassification(__lowerCamelCase )
UpperCamelCase__: List[str] = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: Any = config_and_inputs
UpperCamelCase__: str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = TFRegNetModelTester(self )
UpperCamelCase__: Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__: str = model_class(__lowerCamelCase )
UpperCamelCase__: Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__: List[str] = [*signature.parameters.keys()]
UpperCamelCase__: Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(__lowerCamelCase: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] ):
UpperCamelCase__: Optional[int] = model_class(__lowerCamelCase )
UpperCamelCase__: Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__: List[Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCamelCase__ , UpperCamelCase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__: List[str] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__: Dict = layer_type
UpperCamelCase__: List[str] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__: List[str] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__lowerCamelCase: Tuple , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: List[str]={} ):
UpperCamelCase__: Any = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase__: Tuple = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple()
def recursive_check(__lowerCamelCase: Tuple , __lowerCamelCase: Optional[int] ):
if isinstance(__lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ):
recursive_check(__lowerCamelCase , __lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase )
for model_class in self.all_model_classes:
UpperCamelCase__: Any = model_class(__lowerCamelCase )
UpperCamelCase__: str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
UpperCamelCase__: Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
UpperCamelCase__: Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
UpperCamelCase__: Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__: Tuple = TFRegNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCAmelCase_ ( ):
UpperCamelCase__: int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class _a ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: int = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase__: List[Any] = self.default_image_processor
UpperCamelCase__: Any = prepare_img()
UpperCamelCase__: Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="tf" )
# forward pass
UpperCamelCase__: int = model(**__lowerCamelCase , training=__lowerCamelCase )
# verify the logits
UpperCamelCase__: List[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCamelCase__: int = tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
| 221 | 0 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_a : int = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def _a (lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = test_results.split(' ' )
__snake_case = 0
__snake_case = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__snake_case = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = {}
__snake_case = None
__snake_case = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , lowercase__ ):
__snake_case = True
__snake_case = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
__snake_case = line
__snake_case = False
return failures
class _lowercase :
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ) -> str:
__snake_case = title
__snake_case = doc_test_results['time_spent'].split(',' )[0]
__snake_case = doc_test_results['success']
__snake_case = doc_test_results['failures']
__snake_case = self.n_success + self.n_failures
# Failures and success of the modeling tests
__snake_case = doc_test_results
@property
def a ( self : List[str] ) -> str:
__snake_case = [self._time_spent]
__snake_case = 0
for time in time_spent:
__snake_case = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(SCREAMING_SNAKE_CASE_ ) == 1:
__snake_case = [0, 0, time_parts[0]]
__snake_case , __snake_case , __snake_case = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__snake_case , __snake_case , __snake_case = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f'{int(SCREAMING_SNAKE_CASE_ )}h{int(SCREAMING_SNAKE_CASE_ )}m{int(SCREAMING_SNAKE_CASE_ )}s'
@property
def a ( self : Any ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def a ( self : Tuple ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def a ( self : List[Any] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def a ( self : Optional[int] ) -> Dict:
__snake_case = 40
__snake_case = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
__snake_case = ''
for category, failures in category_failures.items():
if len(SCREAMING_SNAKE_CASE_ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(SCREAMING_SNAKE_CASE_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def a ( self : Union[str, Any] ) -> str:
__snake_case = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(SCREAMING_SNAKE_CASE_ )
@staticmethod
def a ( ) -> Union[str, Any]:
__snake_case = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE_ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE_ , )
def a ( self : Optional[Any] ) -> Dict:
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
__snake_case = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
__snake_case = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE_ , )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
__snake_case = ''
for key, value in failures.items():
__snake_case = value[:200] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE_ ) > 250 else value
failures_text += f'*{key}*\n_{value}_\n\n'
__snake_case = job_name
__snake_case = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
__snake_case = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def a ( self : int ) -> Dict:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
__snake_case = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
__snake_case = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
__snake_case = f'*Num failures* :{len(job_result["failed"] )} \n'
__snake_case = job_result['failures']
__snake_case = self.get_reply_blocks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'Results for {job}' , blocks=SCREAMING_SNAKE_CASE_ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def _a () -> Tuple:
"""simple docstring"""
__snake_case = os.environ['GITHUB_RUN_ID']
__snake_case = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
__snake_case = requests.get(lowercase__ ).json()
__snake_case = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
__snake_case = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
__snake_case = requests.get(url + f'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , lowercase__ )
return {}
def _a (lowercase__ : str ) -> int:
"""simple docstring"""
__snake_case = {}
if os.path.exists(lowercase__ ):
__snake_case = os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding='utf-8' ) as f:
__snake_case = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def _a () -> List[str]:
"""simple docstring"""
class _lowercase :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ) -> int:
__snake_case = name
__snake_case = []
def __str__( self : Optional[int] ) -> Union[str, Any]:
return self.name
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> Any:
self.paths.append({'name': self.name, 'path': path} )
__snake_case = {}
__snake_case = filter(os.path.isdir , os.listdir() )
for directory in directories:
__snake_case = directory
if artifact_name not in _available_artifacts:
__snake_case = Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
_a : Any = get_job_links()
_a : Optional[int] = retrieve_available_artifacts()
_a : Dict = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_a : Dict = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_a : Optional[int] = github_actions_job_links.get("run_doctests")
_a : str = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_a : Union[str, Any] = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_a , _a , _a : Optional[Any] = handle_test_results(artifact["stats"])
_a : Optional[int] = failed
_a : Tuple = success
_a : Dict = time_spent[1:-1] + ", "
_a : int = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_a : Tuple = line.replace("FAILED ", "")
_a : List[str] = line.split()[0].replace("\n", "")
if "::" in line:
_a , _a : List[Any] = line.split("::")
else:
_a , _a : List[str] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_a : int = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_a : Optional[int] = all_failures[test] if test in all_failures else "N/A"
_a : Tuple = failure
break
_a : Any = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 56 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 603 | 0 |
"""simple docstring"""
import functools
def __A ( a_ :str , a_ :str) -> int:
__a : Any = len(a_)
__a : Any = len(a_)
@functools.cache
def min_distance(a_ :int , a_ :int) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a : Optional[int] = int(worda[indexa] != worda[indexa]) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a_) , 1 + min_distance(a_ , indexa + 1) , diff + min_distance(indexa + 1 , indexa + 1) , )
return min_distance(0 , 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 101 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__lowerCAmelCase = Features({'''text''': Value('''string''' )} )
__lowerCAmelCase = Features({} )
__lowerCAmelCase = "text"
@property
def _lowerCamelCase ( self ):
return {self.text_column: "text"} | 101 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 582 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_ = random.Random()
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : Any=4_0_0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_0_0_0 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : Any=1_6_0_0_0 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=8_0 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Dict=6_4 , SCREAMING_SNAKE_CASE__ : Optional[int]="hann_window" , SCREAMING_SNAKE_CASE__ : List[Any]=8_0 , SCREAMING_SNAKE_CASE__ : int=7_6_0_0 , SCREAMING_SNAKE_CASE__ : str=1E-10 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = do_normalize
__a = num_mel_bins
__a = hop_length
__a = win_length
__a = win_function
__a = fmin
__a = fmax
__a = mel_floor
__a = return_attention_mask
def __a ( self : Tuple ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(SCREAMING_SNAKE_CASE__ : str ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE__ ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs]
return speech_inputs
def __a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : int=False ):
'''simple docstring'''
if equal_length:
__a = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
a_ :List[Any] =SpeechTaFeatureExtractor
def __a ( self : List[Any] ):
'''simple docstring'''
__a = SpeechTaFeatureExtractionTester(self )
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE__ , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self : Dict ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__a = [np.asarray(SCREAMING_SNAKE_CASE__ ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# Test batched
__a = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values
__a = feat_extract(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
def __a ( self : Dict ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__a = ["""longest""", """max_length""", """do_not_pad"""]
__a = [None, 1_6_0_0, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a = feat_extract(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
__a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __a ( self : List[str] ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = range(8_0_0 , 1_4_0_0 , 2_0_0 )
__a = [floats_list((1, x) )[0] for x in lengths]
__a = ["""longest""", """max_length""", """do_not_pad"""]
__a = [None, 1_6_0_0, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a = feat_extract(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
__a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __a ( self : List[Any] ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__a = feat_extract(
SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=1_0_0_0 , padding="""max_length""" , return_tensors="""np""" )
__a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__a = feat_extract(
SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=1_0_0_0 , padding="""longest""" , return_tensors="""np""" )
__a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
__a = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__a = feat_extract(
SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=2_0_0_0 , padding="""longest""" , return_tensors="""np""" )
__a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def __a ( self : Any ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(1_0_0 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self : Dict ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__a = [np.asarray(SCREAMING_SNAKE_CASE__ ) for speech_input in speech_inputs]
# Test feature size
__a = feature_extractor(audio_target=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
__a = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
__a = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# Test batched
__a = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values
__a = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__a = np.asarray(SCREAMING_SNAKE_CASE__ )
__a = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values
__a = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
def __a ( self : Optional[Any] ):
'''simple docstring'''
__a = self.feat_extract_tester.prepare_inputs_for_target()
__a = self.feature_extraction_class(**self.feat_extract_dict )
__a = feat_extract.model_input_names[0]
__a = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) for x, y in zip(SCREAMING_SNAKE_CASE__ , processed_features[input_name] ) ) )
__a = self.feat_extract_tester.prepare_inputs_for_target(equal_length=SCREAMING_SNAKE_CASE__ )
__a = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
__a = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__a = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self : Any ):
'''simple docstring'''
__a = self.feat_extract_tester.prepare_inputs_for_target(equal_length=SCREAMING_SNAKE_CASE__ )
__a = self.feature_extraction_class(**self.feat_extract_dict )
__a = feat_extract.model_input_names[0]
__a = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
__a = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__a = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self : int ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_dict )
__a = self.feat_extract_tester.prepare_inputs_for_target()
__a = feat_extract.model_input_names[0]
__a = BatchFeature({input_name: speech_inputs} )
__a = feat_extract.num_mel_bins # hack!
__a = feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding="""longest""" , return_tensors="""np""" )[input_name]
__a = feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self : Any ):
'''simple docstring'''
__a = self.feat_extract_dict
__a = True
__a = self.feature_extraction_class(**SCREAMING_SNAKE_CASE__ )
__a = self.feat_extract_tester.prepare_inputs_for_target()
__a = [len(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs]
__a = feat_extract.model_input_names[0]
__a = BatchFeature({input_name: speech_inputs} )
__a = feat_extract.num_mel_bins # hack!
__a = feat_extract.pad(SCREAMING_SNAKE_CASE__ , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , SCREAMING_SNAKE_CASE__ )
def __a ( self : Tuple ):
'''simple docstring'''
__a = self.feat_extract_dict
__a = True
__a = self.feature_extraction_class(**SCREAMING_SNAKE_CASE__ )
__a = self.feat_extract_tester.prepare_inputs_for_target()
__a = [len(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs]
__a = feat_extract.model_input_names[0]
__a = BatchFeature({input_name: speech_inputs} )
__a = min(SCREAMING_SNAKE_CASE__ )
__a = feat_extract.num_mel_bins # hack!
__a = feat_extract.pad(
SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
self.assertIn("""attention_mask""" , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
from datasets import load_dataset
__a = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__a = ds.sort("""id""" ).select(range(SCREAMING_SNAKE_CASE__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __a ( self : List[str] ):
'''simple docstring'''
__a = torch.tensor(
[2.38_04E-03, 2.07_52E-03, 1.98_36E-03, 2.10_57E-03, 1.61_74E-03,
3.05_18E-04, 9.15_53E-05, 3.35_69E-04, 9.76_56E-04, 1.83_11E-03,
2.01_42E-03, 2.10_57E-03, 1.73_95E-03, 4.57_76E-04, -3.96_73E-04,
4.57_76E-04, 1.00_71E-03, 9.15_53E-05, 4.88_28E-04, 1.15_97E-03,
7.32_42E-04, 9.46_04E-04, 1.80_05E-03, 1.83_11E-03, 8.85_01E-04,
4.27_25E-04, 4.88_28E-04, 7.32_42E-04, 1.09_86E-03, 2.10_57E-03] )
# fmt: on
__a = self._load_datasamples(1 )
__a = SpeechTaFeatureExtractor()
__a = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , SCREAMING_SNAKE_CASE__ , atol=1E-6 ) )
def __a ( self : Optional[int] ):
'''simple docstring'''
__a = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
__a = self._load_datasamples(1 )
__a = SpeechTaFeatureExtractor()
__a = feature_extractor(audio_target=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 582 | 1 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ : List[str] = logging.get_logger(__name__)
__magic_name__ : List[Any] = {'''vocab_file''': '''spiece.model'''}
__magic_name__ : int = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
__magic_name__ : List[Any] = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["""input_ids""", """attention_mask"""]
snake_case__ = []
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict="<unk>" , _SCREAMING_SNAKE_CASE : str="<s>" , _SCREAMING_SNAKE_CASE : List[str]="</s>" , _SCREAMING_SNAKE_CASE : Tuple="<pad>" , _SCREAMING_SNAKE_CASE : str="[SEP]" , _SCREAMING_SNAKE_CASE : Any="[MASK]" , _SCREAMING_SNAKE_CASE : Any="[CLS]" , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE : int , ):
"""simple docstring"""
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else unk_token
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self : str , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : Tuple , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
UpperCamelCase = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = ''
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = False
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Tuple , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = None , _SCREAMING_SNAKE_CASE : bool = True , **_SCREAMING_SNAKE_CASE : Any , ):
"""simple docstring"""
UpperCamelCase = kwargs.pop('use_source_tokenizer' , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = []
sub_texts.append(_SCREAMING_SNAKE_CASE )
else:
current_sub_text.append(_SCREAMING_SNAKE_CASE )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_SCREAMING_SNAKE_CASE ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCamelCase = re.sub(r' (\[(MASK|SEP)\])' , r'\1' , ' '.join(_SCREAMING_SNAKE_CASE ) )
else:
UpperCamelCase = ''.join(_SCREAMING_SNAKE_CASE )
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(_SCREAMING_SNAKE_CASE )
return clean_text
else:
return text
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None , _SCREAMING_SNAKE_CASE : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 410 |
from __future__ import annotations
import math
import random
from typing import Any
class A__ :
'''simple docstring'''
def __init__( self : str ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 0
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.head == self.tail
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
self.data.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.tail + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.data[self.head]
UpperCamelCase = self.head + 1
return ret
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return self.tail - self.head
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class A__ :
'''simple docstring'''
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = 1
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.data
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.left
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.right
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.height
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase = data
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : MyNode | None ):
"""simple docstring"""
UpperCamelCase = node
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : MyNode | None ):
"""simple docstring"""
UpperCamelCase = node
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase = height
def lowercase__ ( _UpperCamelCase) -> int:
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> int:
"""simple docstring"""
if a > b:
return a
return b
def lowercase__ ( _UpperCamelCase) -> MyNode:
"""simple docstring"""
print('left rotation node:' , node.get_data())
UpperCamelCase = node.get_left()
assert ret is not None
node.set_left(ret.get_right())
ret.set_right(_UpperCamelCase)
UpperCamelCase = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_UpperCamelCase)
UpperCamelCase = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(_UpperCamelCase)
return ret
def lowercase__ ( _UpperCamelCase) -> MyNode:
"""simple docstring"""
print('right rotation node:' , node.get_data())
UpperCamelCase = node.get_right()
assert ret is not None
node.set_right(ret.get_left())
ret.set_left(_UpperCamelCase)
UpperCamelCase = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_UpperCamelCase)
UpperCamelCase = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1
ret.set_height(_UpperCamelCase)
return ret
def lowercase__ ( _UpperCamelCase) -> MyNode:
"""simple docstring"""
UpperCamelCase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_UpperCamelCase))
return right_rotation(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase) -> MyNode:
"""simple docstring"""
UpperCamelCase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_UpperCamelCase))
return left_rotation(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> MyNode | None:
"""simple docstring"""
if node is None:
return MyNode(_UpperCamelCase)
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _UpperCamelCase))
if (
get_height(node.get_left()) - get_height(node.get_right()) == 2
): # an unbalance detected
UpperCamelCase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
UpperCamelCase = right_rotation(_UpperCamelCase)
else:
UpperCamelCase = lr_rotation(_UpperCamelCase)
else:
node.set_right(insert_node(node.get_right() , _UpperCamelCase))
if get_height(node.get_right()) - get_height(node.get_left()) == 2:
UpperCamelCase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
UpperCamelCase = rl_rotation(_UpperCamelCase)
else:
UpperCamelCase = left_rotation(_UpperCamelCase)
UpperCamelCase = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1
node.set_height(_UpperCamelCase)
return node
def lowercase__ ( _UpperCamelCase) -> Any:
"""simple docstring"""
while True:
UpperCamelCase = root.get_right()
if right_child is None:
break
UpperCamelCase = right_child
return root.get_data()
def lowercase__ ( _UpperCamelCase) -> Any:
"""simple docstring"""
while True:
UpperCamelCase = root.get_left()
if left_child is None:
break
UpperCamelCase = left_child
return root.get_data()
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> MyNode | None:
"""simple docstring"""
UpperCamelCase = root.get_left()
UpperCamelCase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
UpperCamelCase = get_left_most(_UpperCamelCase)
root.set_data(_UpperCamelCase)
root.set_right(del_node(_UpperCamelCase , _UpperCamelCase))
elif left_child is not None:
UpperCamelCase = left_child
elif right_child is not None:
UpperCamelCase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data')
return root
else:
root.set_left(del_node(_UpperCamelCase , _UpperCamelCase))
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_UpperCamelCase , _UpperCamelCase))
if get_height(_UpperCamelCase) - get_height(_UpperCamelCase) == 2:
assert right_child is not None
if get_height(right_child.get_right()) > get_height(right_child.get_left()):
UpperCamelCase = left_rotation(_UpperCamelCase)
else:
UpperCamelCase = rl_rotation(_UpperCamelCase)
elif get_height(_UpperCamelCase) - get_height(_UpperCamelCase) == -2:
assert left_child is not None
if get_height(left_child.get_left()) > get_height(left_child.get_right()):
UpperCamelCase = right_rotation(_UpperCamelCase)
else:
UpperCamelCase = lr_rotation(_UpperCamelCase)
UpperCamelCase = my_max(get_height(root.get_right()) , get_height(root.get_left())) + 1
root.set_height(_UpperCamelCase)
return root
class A__ :
'''simple docstring'''
def __init__( self : List[str] ):
"""simple docstring"""
UpperCamelCase = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return get_height(self.root )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
print('insert:' + str(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = insert_node(self.root , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
print('delete:' + str(_SCREAMING_SNAKE_CASE ) )
if self.root is None:
print('Tree is empty!' )
return
UpperCamelCase = del_node(self.root , _SCREAMING_SNAKE_CASE )
def __str__( self : Any , ): # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
UpperCamelCase = ''
UpperCamelCase = MyQueue()
q.push(self.root )
UpperCamelCase = self.get_height()
if layer == 0:
return output
UpperCamelCase = 0
while not q.is_empty():
UpperCamelCase = q.pop()
UpperCamelCase = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_SCREAMING_SNAKE_CASE )
q.push(_SCREAMING_SNAKE_CASE )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
UpperCamelCase = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , _SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def lowercase__ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__magic_name__ : Any = AVLtree()
__magic_name__ : str = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 410 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["PerceiverFeatureExtractor"]
__lowerCAmelCase = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 684 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Optional[int] , lowerCAmelCase: List[Any]=None , **lowerCAmelCase: Tuple ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = [x.strip() for x in open(lowerCAmelCase ).readlines()]
_UpperCAmelCase : List[Any] = [x.strip() for x in open(lowerCAmelCase ).readlines()][: len(lowerCAmelCase )]
_UpperCAmelCase : Tuple = calculate_rouge(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
if save_path is not None:
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 300 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = KandinskyVaaControlnetPipeline
__lowercase : Tuple = ['image_embeds', 'negative_image_embeds', 'hint']
__lowercase : int = ['image_embeds', 'negative_image_embeds', 'hint']
__lowercase : Union[str, Any] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__lowercase : int = False
@property
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return 1_00
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
torch.manual_seed(0 )
snake_case__ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case__ = UNetaDConditionModel(**_a )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
torch.manual_seed(0 )
snake_case__ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.dummy_unet
snake_case__ = self.dummy_movq
snake_case__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.00085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
snake_case__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:str , _a:str=0 ):
snake_case__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
snake_case__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
snake_case__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = pipe(**self.get_dummy_inputs(_a ) )
snake_case__ = output.images
snake_case__ = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
snake_case__ = torch.from_numpy(np.array(_a ) ).float() / 255.0
snake_case__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case__ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
snake_case__ = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
snake_case__ = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
snake_case__ = '''A robot, 4k photo'''
snake_case__ = torch.Generator(device='''cuda''' ).manual_seed(0 )
snake_case__ , snake_case__ = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
snake_case__ = torch.Generator(device='''cuda''' ).manual_seed(0 )
snake_case__ = pipeline(
image_embeds=_a , negative_image_embeds=_a , hint=_a , generator=_a , num_inference_steps=1_00 , output_type='''np''' , )
snake_case__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(_a , _a )
| 208 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Any , _a:Tuple ):
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_a ) for s in shape] )}.npy"""
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Tuple=0 , _a:Dict=(4, 4, 64, 64) , _a:List[Any]=False ):
snake_case__ = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ = jnp.array(load_hf_numpy(self.get_file_format(_a , _a ) ) , dtype=_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:List[Any]=False , _a:str="CompVis/stable-diffusion-v1-4" ):
snake_case__ = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ = '''bf16''' if fpaa else None
snake_case__ , snake_case__ = FlaxUNetaDConditionModel.from_pretrained(
_a , subfolder='''unet''' , dtype=_a , revision=_a )
return model, params
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int]=0 , _a:Tuple=(4, 77, 7_68) , _a:int=False ):
snake_case__ = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ = jnp.array(load_hf_numpy(self.get_file_format(_a , _a ) ) , dtype=_a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Optional[Any] , _a:Optional[int] ):
snake_case__ , snake_case__ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_a )
snake_case__ = self.get_latents(_a , fpaa=_a )
snake_case__ = self.get_encoder_hidden_states(_a , fpaa=_a )
snake_case__ = model.apply(
{'''params''': params} , _a , jnp.array(_a , dtype=jnp.intaa ) , encoder_hidden_states=_a , ).sample
assert sample.shape == latents.shape
snake_case__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case__ = jnp.array(_a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_a , _a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Optional[int] , _a:Tuple , _a:str ):
snake_case__ , snake_case__ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_a )
snake_case__ = self.get_latents(_a , shape=(4, 4, 96, 96) , fpaa=_a )
snake_case__ = self.get_encoder_hidden_states(_a , shape=(4, 77, 10_24) , fpaa=_a )
snake_case__ = model.apply(
{'''params''': params} , _a , jnp.array(_a , dtype=jnp.intaa ) , encoder_hidden_states=_a , ).sample
assert sample.shape == latents.shape
snake_case__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case__ = jnp.array(_a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_a , _a , atol=1e-2 )
| 208 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( a__ , a__ = 0) -> List[Any]:
"""simple docstring"""
_snake_case : List[str] = length or len(SCREAMING_SNAKE_CASE__)
_snake_case : List[Any] = False
for i in range(length - 1):
if list_data[i] > list_data[i + 1]:
_snake_case : Tuple = list_data[i + 1], list_data[i]
_snake_case : List[str] = True
return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE__ , length - 1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 517 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __magic_name__ ( self , _A ) -> float:
return 0.0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Union[str, Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__a : List[Any] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Dict = 512
__a : Dict = [1] + [0] * (size - 1)
__a : Optional[int] = [filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__a : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
__a : Union[str, Any] = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[int] = 20 * np.logaa(SCREAMING_SNAKE_CASE__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__a : Dict = get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(SCREAMING_SNAKE_CASE__ )
plt.show()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Optional[Any] = 512
__a : List[str] = [1] + [0] * (size - 1)
__a : List[str] = [filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__a : Any = [0] * (samplerate - size) # zero-padding
outputs += filler
__a : Tuple = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) )
plt.show()
| 597 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Optional[int] = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : List[Any]=False ):
'''simple docstring'''
UpperCAmelCase: Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase: Any = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Any=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase: List[str] = ""
else:
UpperCAmelCase: List[str] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase: Dict = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
UpperCAmelCase: str = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase: Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase: List[Any] = in_proj_bias[: config.hidden_size]
UpperCAmelCase: str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase: Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase: Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase: List[Any] = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( snake_case_ : int ):
'''simple docstring'''
UpperCAmelCase: List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict ):
'''simple docstring'''
UpperCAmelCase: Optional[Any] = dct.pop(snake_case_ )
UpperCAmelCase: Union[str, Any] = val
def __UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase: int = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase: int = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str]=True ):
'''simple docstring'''
UpperCAmelCase: Tuple = ViTConfig()
# patch_size
if model_name[-1] == "8":
UpperCAmelCase: Optional[Any] = 8
# set labels if required
if not base_model:
UpperCAmelCase: Any = 1_0_0_0
UpperCAmelCase: Optional[int] = "huggingface/label-files"
UpperCAmelCase: Dict = "imagenet-1k-id2label.json"
UpperCAmelCase: Any = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase: Dict = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase: Dict = idalabel
UpperCAmelCase: List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
UpperCAmelCase: int = 3_8_4
UpperCAmelCase: str = 1_5_3_6
UpperCAmelCase: Dict = 1_2
UpperCAmelCase: List[Any] = 6
# load original model from torch hub
UpperCAmelCase: Tuple = torch.hub.load("facebookresearch/dino:main" , snake_case_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase: str = original_model.state_dict()
if base_model:
remove_classification_head_(snake_case_ )
UpperCAmelCase: str = create_rename_keys(snake_case_ , base_model=snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , snake_case_ , snake_case_ )
# load HuggingFace model
if base_model:
UpperCAmelCase: Any = ViTModel(snake_case_ , add_pooling_layer=snake_case_ ).eval()
else:
UpperCAmelCase: List[Any] = ViTForImageClassification(snake_case_ ).eval()
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by ViTImageProcessor
UpperCAmelCase: Optional[Any] = ViTImageProcessor()
UpperCAmelCase: List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase: List[Any] = encoding["pixel_values"]
UpperCAmelCase: Any = model(snake_case_ )
if base_model:
UpperCAmelCase: Optional[int] = original_model(snake_case_ )
assert torch.allclose(snake_case_ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
UpperCAmelCase: List[str] = original_model(snake_case_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1e-3 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case_ : Optional[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 166 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ : Optional[int] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Optional[int]=8 ):
'''simple docstring'''
UpperCAmelCase: Any = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase: Dict = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowerCamelCase ( lowercase ):
def __init__( self , __snake_case , __snake_case , __snake_case , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=__snake_case , scheduler=__snake_case , movq=__snake_case , )
UpperCAmelCase: List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
if latents is None:
UpperCAmelCase: Tuple = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
UpperCAmelCase: str = latents.to(__snake_case )
UpperCAmelCase: Tuple = latents * scheduler.init_noise_sigma
return latents
def A__ ( self , __snake_case=0 ) -> str:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase: Union[str, Any] = torch.device(F'cuda:{gpu_id}' )
UpperCAmelCase: Optional[int] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case , __snake_case )
def A__ ( self , __snake_case=0 ) -> List[Any]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase: Optional[Any] = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase: int = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase: str = cpu_offload_with_hook(__snake_case , __snake_case , prev_module_hook=__snake_case )
# We'll offload the last model manually.
UpperCAmelCase: Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A__ ( self ) -> Any:
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__snake_case , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__snake_case )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case = 5_1_2 , __snake_case = 5_1_2 , __snake_case = 1_0_0 , __snake_case = 4.0 , __snake_case = 1 , __snake_case = None , __snake_case = None , __snake_case = "pil" , __snake_case = True , ) -> Dict:
"""simple docstring"""
UpperCAmelCase: Optional[int] = self._execution_device
UpperCAmelCase: Optional[int] = guidance_scale > 1.0
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: int = torch.cat(__snake_case , dim=0 )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: List[Any] = torch.cat(__snake_case , dim=0 )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: List[Any] = torch.cat(__snake_case , dim=0 )
UpperCAmelCase: Dict = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase: Dict = image_embeds.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase: Dict = negative_image_embeds.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase: Tuple = hint.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase: Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__snake_case )
UpperCAmelCase: Any = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__snake_case )
self.scheduler.set_timesteps(__snake_case , device=__snake_case )
UpperCAmelCase: Any = self.scheduler.timesteps
UpperCAmelCase: List[str] = self.movq.config.latent_channels
UpperCAmelCase , UpperCAmelCase: Union[str, Any] = downscale_height_and_width(__snake_case , __snake_case , self.movq_scale_factor )
# create initial latent
UpperCAmelCase: Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __snake_case , __snake_case , __snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase: List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase: str = {"image_embeds": image_embeds, "hint": hint}
UpperCAmelCase: Any = self.unet(
sample=__snake_case , timestep=__snake_case , encoder_hidden_states=__snake_case , added_cond_kwargs=__snake_case , return_dict=__snake_case , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase: Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase: str = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase: Dict = variance_pred.chunk(2 )
UpperCAmelCase: List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase: Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase: Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase: int = self.scheduler.step(
__snake_case , __snake_case , __snake_case , generator=__snake_case , )[0]
# post-processing
UpperCAmelCase: Optional[Any] = self.movq.decode(__snake_case , force_not_quantize=__snake_case )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
UpperCAmelCase: Optional[Any] = image * 0.5 + 0.5
UpperCAmelCase: Union[str, Any] = image.clamp(0 , 1 )
UpperCAmelCase: List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase: Dict = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 166 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 84 |
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [True] * n
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < n:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , __SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(__SCREAMING_SNAKE_CASE )
return primes
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 9999_6666_3333 ):
lowercase = math.floor(math.sqrt(__SCREAMING_SNAKE_CASE ) ) + 100
lowercase = prime_sieve(__SCREAMING_SNAKE_CASE )
lowercase = 0
lowercase = 0
lowercase = primes[prime_index]
while (last_prime**2) <= limit:
lowercase = primes[prime_index + 1]
lowercase = last_prime**2
lowercase = next_prime**2
# Get numbers divisible by lps(current)
lowercase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 84 | 1 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowercase__( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str=0 ) -> Any:
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase_ , device=torch.device(lowerCamelCase_ ) , )
SCREAMING_SNAKE_CASE : Any = floats_tensor(control_image.shape , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __lowerCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class lowercase__( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCamelCase_ :Tuple ):
if isinstance(lowerCamelCase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCamelCase_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCamelCase_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : Optional[int] = MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int]=0 ) -> Dict:
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = 2
SCREAMING_SNAKE_CASE : List[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase_ , device=torch.device(lowerCamelCase_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase_ , device=torch.device(lowerCamelCase_ ) , ),
]
SCREAMING_SNAKE_CASE : Tuple = floats_tensor(control_image[0].shape , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __lowerCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_0.0
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = steps
SCREAMING_SNAKE_CASE : Any = scale
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = steps
SCREAMING_SNAKE_CASE : Union[str, Any] = scale
SCREAMING_SNAKE_CASE : List[str] = pipe(**lowerCamelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = steps
SCREAMING_SNAKE_CASE : List[str] = scale
SCREAMING_SNAKE_CASE : str = pipe(**lowerCamelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = steps
SCREAMING_SNAKE_CASE : Dict = scale
SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCamelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self :int ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : str = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCamelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCamelCase_ , controlnet=lowerCamelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = '''evil space-punk bird'''
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_12, 5_12) )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_12, 5_12) )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(
lowerCamelCase_ , lowerCamelCase_ , control_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (5_12, 5_12, 3)
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 719 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
from importlib import import_module
from .logging import get_logger
__a = get_logger(__name__)
class lowercase__:
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=None ) -> Tuple:
lowercase_ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowercase_ = module._original_module if isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) else module
class lowercase__:
"""simple docstring"""
a :str = []
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int=None ) -> Optional[Any]:
lowercase_ = obj
lowercase_ = target
lowercase_ = new
lowercase_ = target.split('''.''' )[0]
lowercase_ = {}
lowercase_ = attrs or []
def __enter__( self : int ) -> Dict:
*lowercase_ , lowercase_ = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
try:
lowercase_ = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
lowercase_ = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
lowercase_ = obj_attr
# patch at top level
setattr(self.obj , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(SCREAMING_SNAKE_CASE_ , attrs=self.attrs ) )
lowercase_ = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , attrs=self.attrs ) )
lowercase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
lowercase_ = getattr(import_module('''.'''.join(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , SCREAMING_SNAKE_CASE_ ) is attr_value:
lowercase_ = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
lowercase_ = globals()['''__builtins__'''][target_attr]
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self : Any , *SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
for attr in list(self.original ):
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.original.pop(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Any ) -> str:
self.__enter__()
self._active_patches.append(self )
def _lowercase ( self : Optional[int] ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 97 |
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCamelCase ( *lowercase_ : Union[str, Any] ):
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
a_ = list(lowercase_ )
for i in range(len(lowercase_ ) ):
a_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCamelCase ( lowercase_ : Exception ):
"""simple docstring"""
a_ = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(lowercase_ , lowercase_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCamelCase ( lowercase_ : callable = None , lowercase_ : int = 128 ):
"""simple docstring"""
if function is None:
return functools.partial(lowercase_ , starting_batch_size=lowercase_ )
a_ = starting_batch_size
def decorator(*lowercase_ : str , **lowercase_ : Dict ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
a_ = list(inspect.signature(lowercase_ ).parameters.keys() )
# Guard against user error
if len(lowercase_ ) < (len(lowercase_ ) + 1):
a_ = ', '.join([F'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'Batch size was passed into `{function.__name__}` as the first argument when called.'
F'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(lowercase_ , *lowercase_ , **lowercase_ )
except Exception as e:
if should_reduce_batch_size(lowercase_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 536 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = ['pixel_values']
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = IMAGENET_DEFAULT_MEAN , __A = IMAGENET_DEFAULT_STD , **__A , ) -> None:
super().__init__(**__A )
_lowerCAmelCase =size if size is not None else {'shortest_edge': 224}
_lowerCAmelCase =get_size_dict(__A , default_to_square=__A )
_lowerCAmelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase =get_size_dict(__A , param_name='crop_size' )
_lowerCAmelCase =do_resize
_lowerCAmelCase =size
_lowerCAmelCase =resample
_lowerCAmelCase =do_center_crop
_lowerCAmelCase =crop_size
_lowerCAmelCase =do_rescale
_lowerCAmelCase =rescale_factor
_lowerCAmelCase =do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__A , default_to_square=__A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCAmelCase =int((256 / 224) * size['shortest_edge'] )
_lowerCAmelCase =get_resize_output_image_size(__A , size=__A , default_to_square=__A )
_lowerCAmelCase ={'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
__A , size=(size_dict['height'], size_dict['width']) , resample=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(__A , size=(size['height'], size['width']) , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
return rescale(__A , scale=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> BatchFeature:
_lowerCAmelCase =do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase =resample if resample is not None else self.resample
_lowerCAmelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase =image_std if image_std is not None else self.image_std
_lowerCAmelCase =size if size is not None else self.size
_lowerCAmelCase =get_size_dict(__A , default_to_square=__A )
_lowerCAmelCase =crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase =get_size_dict(__A , param_name='crop_size' )
_lowerCAmelCase =make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase =[to_numpy_array(__A ) for image in images]
if do_resize:
_lowerCAmelCase =[self.resize(__A , __A , __A ) for image in images]
if do_center_crop:
_lowerCAmelCase =[self.center_crop(__A , __A ) for image in images]
if do_rescale:
_lowerCAmelCase =[self.rescale(__A , __A ) for image in images]
if do_normalize:
_lowerCAmelCase =[self.normalize(__A , __A , __A ) for image in images]
_lowerCAmelCase =[to_channel_dimension_format(__A , __A ) for image in images]
_lowerCAmelCase ={'pixel_values': images}
return BatchFeature(data=__A , tensor_type=__A )
| 712 | '''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = '''efficientnet'''
def __init__(self , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 6_00 , SCREAMING_SNAKE_CASE__ = 2.0 , SCREAMING_SNAKE_CASE__ = 3.1 , SCREAMING_SNAKE_CASE__ = 8 , SCREAMING_SNAKE_CASE__ = [3, 3, 5, 3, 5, 5, 3] , SCREAMING_SNAKE_CASE__ = [32, 16, 24, 40, 80, 1_12, 1_92] , SCREAMING_SNAKE_CASE__ = [16, 24, 40, 80, 1_12, 1_92, 3_20] , SCREAMING_SNAKE_CASE__ = [] , SCREAMING_SNAKE_CASE__ = [1, 2, 2, 2, 1, 2, 1] , SCREAMING_SNAKE_CASE__ = [1, 2, 2, 3, 3, 4, 1] , SCREAMING_SNAKE_CASE__ = [1, 6, 6, 6, 6, 6, 6] , SCREAMING_SNAKE_CASE__ = 0.25 , SCREAMING_SNAKE_CASE__ = "swish" , SCREAMING_SNAKE_CASE__ = 25_60 , SCREAMING_SNAKE_CASE__ = "mean" , SCREAMING_SNAKE_CASE__ = 0.02 , SCREAMING_SNAKE_CASE__ = 0.001 , SCREAMING_SNAKE_CASE__ = 0.99 , SCREAMING_SNAKE_CASE__ = 0.5 , SCREAMING_SNAKE_CASE__ = 0.2 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : str = image_size
SCREAMING_SNAKE_CASE__ : List[Any] = width_coefficient
SCREAMING_SNAKE_CASE__ : Any = depth_coefficient
SCREAMING_SNAKE_CASE__ : Dict = depth_divisor
SCREAMING_SNAKE_CASE__ : List[str] = kernel_sizes
SCREAMING_SNAKE_CASE__ : int = in_channels
SCREAMING_SNAKE_CASE__ : int = out_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = depthwise_padding
SCREAMING_SNAKE_CASE__ : List[str] = strides
SCREAMING_SNAKE_CASE__ : Tuple = num_block_repeats
SCREAMING_SNAKE_CASE__ : List[str] = expand_ratios
SCREAMING_SNAKE_CASE__ : Any = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dim
SCREAMING_SNAKE_CASE__ : List[str] = pooling_type
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = batch_norm_eps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_norm_momentum
SCREAMING_SNAKE_CASE__ : List[Any] = dropout_rate
SCREAMING_SNAKE_CASE__ : Tuple = drop_connect_rate
SCREAMING_SNAKE_CASE__ : Tuple = sum(SCREAMING_SNAKE_CASE__ ) * 4
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-5
| 223 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCAmelCase__ : int = logging.getLogger(__name__)
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Tuple = '''sequence-classification'''
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
if type(SCREAMING_SNAKE_CASE__ ) == dict:
SCREAMING_SNAKE_CASE__ : List[str] = Namespace(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = glue_output_modes[hparams.task]
SCREAMING_SNAKE_CASE__ : Optional[Any] = glue_tasks_num_labels[hparams.task]
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.mode )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return self.model(**SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
SCREAMING_SNAKE_CASE__ : Any = self(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = outputs[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.trainer.lr_schedulers[0]["""scheduler"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.hparams
SCREAMING_SNAKE_CASE__ : Optional[int] = processors[args.task]()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
SCREAMING_SNAKE_CASE__ : List[str] = self._feature_file(SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , SCREAMING_SNAKE_CASE__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
SCREAMING_SNAKE_CASE__ : str = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> DataLoader:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = """dev""" if mode == """test""" else mode
SCREAMING_SNAKE_CASE__ : Optional[int] = self._feature_file(SCREAMING_SNAKE_CASE__ )
logger.info("""Loading features from cached file %s""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
SCREAMING_SNAKE_CASE__ : str = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , batch_size=SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
SCREAMING_SNAKE_CASE__ : List[Any] = self(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = outputs[:2]
SCREAMING_SNAKE_CASE__ : int = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE__ : List[str] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
SCREAMING_SNAKE_CASE__ : Any = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
SCREAMING_SNAKE_CASE__ : int = np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.squeeze(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE__ : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE__ : Tuple = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
SCREAMING_SNAKE_CASE__ : int = dict(results.items() )
SCREAMING_SNAKE_CASE__ : str = results
return ret, preds_list, out_label_list
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self._eval_end(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self._eval_end(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=SCREAMING_SNAKE_CASE__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=SCREAMING_SNAKE_CASE__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
add_generic_args(_snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = GLUETransformer.add_model_specific_args(_snake_case ,os.getcwd() )
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
"""./results""" ,f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' ,)
os.makedirs(args.output_dir )
SCREAMING_SNAKE_CASE__ : Dict = GLUETransformer(_snake_case )
SCREAMING_SNAKE_CASE__ : int = generic_train(_snake_case ,_snake_case )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
SCREAMING_SNAKE_CASE__ : Any = sorted(glob.glob(os.path.join(args.output_dir ,"""checkpoint-epoch=*.ckpt""" ) ,recursive=_snake_case ) )
SCREAMING_SNAKE_CASE__ : Dict = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_snake_case )
if __name__ == "__main__":
main()
| 223 | 1 |
"""simple docstring"""
UpperCAmelCase_ : Tuple = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 717 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _lowerCAmelCase(a : int ) -> Union[str, Any]:
return EnvironmentCommand()
def _lowerCAmelCase(a : str ) -> Optional[int]:
return EnvironmentCommand(args.accelerate_config_file )
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parser.add_parser('''env''' )
download_parser.set_defaults(func=_A )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_A , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_A )
def __init__( self , _A , *_A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =accelerate_config_file
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='''not installed'''
if is_safetensors_available():
import safetensors
_SCREAMING_SNAKE_CASE =safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
_SCREAMING_SNAKE_CASE =f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
_SCREAMING_SNAKE_CASE ='''not installed'''
_SCREAMING_SNAKE_CASE =_SCREAMING_SNAKE_CASE ='''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_SCREAMING_SNAKE_CASE =accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_A ):
_SCREAMING_SNAKE_CASE =load_config_from_file(self._accelerate_config_file ).to_dict()
_SCREAMING_SNAKE_CASE =(
'''\n'''.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_A , _A )
else f"""\t{accelerate_config}"""
)
_SCREAMING_SNAKE_CASE ='''not installed'''
_SCREAMING_SNAKE_CASE ='''NA'''
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE =torch.__version__
_SCREAMING_SNAKE_CASE =torch.cuda.is_available()
_SCREAMING_SNAKE_CASE ='''not installed'''
_SCREAMING_SNAKE_CASE ='''NA'''
if is_tf_available():
import tensorflow as tf
_SCREAMING_SNAKE_CASE =tf.__version__
try:
# deprecated in v2.1
_SCREAMING_SNAKE_CASE =tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_SCREAMING_SNAKE_CASE =bool(tf.config.list_physical_devices('''GPU''' ) )
_SCREAMING_SNAKE_CASE ='''not installed'''
_SCREAMING_SNAKE_CASE ='''not installed'''
_SCREAMING_SNAKE_CASE ='''not installed'''
_SCREAMING_SNAKE_CASE ='''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_SCREAMING_SNAKE_CASE =flax.__version__
_SCREAMING_SNAKE_CASE =jax.__version__
_SCREAMING_SNAKE_CASE =jaxlib.__version__
_SCREAMING_SNAKE_CASE =jax.lib.xla_bridge.get_backend().platform
_SCREAMING_SNAKE_CASE ={
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f"""{safetensors_version}""",
'''Accelerate version''': f"""{accelerate_version}""",
'''Accelerate config''': f"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': f"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': f"""{flax_version} ({jax_backend})""",
'''Jax version''': f"""{jax_version}""",
'''JaxLib version''': f"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_A ) )
return info
@staticmethod
def UpperCamelCase_ ( _A ):
'''simple docstring'''
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 165 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''MaskFormerFeatureExtractor''']
UpperCAmelCase_ = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
UpperCAmelCase_ = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 271 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : List[str] = StableDiffusionSAGPipeline
lowerCAmelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : int = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : int = False
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase: Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_UpperCamelCase: List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
_UpperCamelCase: Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCamelCase: int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
_UpperCamelCase: Any = CLIPTextModel(_lowercase )
_UpperCamelCase: List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCamelCase: int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase ( self : Dict , _lowercase : List[Any] , _lowercase : int=0 ):
"""simple docstring"""
if str(_lowercase ).startswith('''mps''' ):
_UpperCamelCase: Tuple = torch.manual_seed(_lowercase )
else:
_UpperCamelCase: List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_UpperCamelCase: Dict = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Any = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_UpperCamelCase: Any = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
_UpperCamelCase: Tuple = '''.'''
_UpperCamelCase: Tuple = torch.manual_seed(0 )
_UpperCamelCase: Union[str, Any] = sag_pipe(
[prompt] , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
_UpperCamelCase: Optional[Any] = output.images
_UpperCamelCase: Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase: Any = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_UpperCamelCase: int = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
_UpperCamelCase: List[Any] = '''.'''
_UpperCamelCase: str = torch.manual_seed(0 )
_UpperCamelCase: Dict = sag_pipe(
[prompt] , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
_UpperCamelCase: Union[str, Any] = output.images
_UpperCamelCase: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase: int = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase: Tuple = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_UpperCamelCase: int = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
_UpperCamelCase: Any = '''.'''
_UpperCamelCase: List[Any] = torch.manual_seed(0 )
_UpperCamelCase: str = sag_pipe(
[prompt] , width=768 , height=512 , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
_UpperCamelCase: str = output.images
assert image.shape == (1, 512, 768, 3) | 271 | 1 |
"""simple docstring"""
import numpy as np
def lowercase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: List[str] , _lowerCamelCase: Tuple , _lowerCamelCase: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = int(np.ceil((x_end - xa) / h ) )
__lowerCamelCase : Optional[int] = np.zeros((n + 1,) )
__lowerCamelCase : List[str] = ya
__lowerCamelCase : List[Any] = xa
for k in range(_lowerCamelCase ):
__lowerCamelCase : Dict = f(_lowerCamelCase , y[k] )
__lowerCamelCase : Optional[int] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCamelCase : Optional[int] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCamelCase : int = f(x + h , y[k] + h * ka )
__lowerCamelCase : int = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 366 | """simple docstring"""
import os
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = os.path.dirname(os.path.realpath(_lowerCamelCase ) )
__lowerCamelCase : int = os.path.join(_lowerCamelCase , "triangle.txt" )
with open(_lowerCamelCase ) as f:
__lowerCamelCase : List[str] = f.readlines()
__lowerCamelCase : List[str] = []
for line in triangle:
__lowerCamelCase : Optional[int] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(_lowerCamelCase ) )
a.append(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(len(a[i] ) ):
__lowerCamelCase : int = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowerCamelCase : Union[str, Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCamelCase , _lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution()) | 366 | 1 |
def __UpperCamelCase ( A ):
if len(A ) < 2:
return collection
def circle_sort_util(A , A , A ) -> bool:
UpperCamelCase__ = False
if low == high:
return swapped
UpperCamelCase__ = low
UpperCamelCase__ = high
while left < right:
if collection[left] > collection[right]:
UpperCamelCase__ , UpperCamelCase__ = (
collection[right],
collection[left],
)
UpperCamelCase__ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
UpperCamelCase__ , UpperCamelCase__ = (
collection[right + 1],
collection[left],
)
UpperCamelCase__ = True
UpperCamelCase__ = low + int((high - low) / 2 )
UpperCamelCase__ = circle_sort_util(A , A , A )
UpperCamelCase__ = circle_sort_util(A , mid + 1 , A )
return swapped or left_swap or right_swap
UpperCamelCase__ = True
while is_not_sorted is True:
UpperCamelCase__ = circle_sort_util(A , 0 , len(A ) - 1 )
return collection
if __name__ == "__main__":
__magic_name__ =input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ =[int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 415 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _A ( unittest.TestCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 / 255 , SCREAMING_SNAKE_CASE_=True , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean
UpperCamelCase__ = image_std
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_pad
def _a (self ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> str:
'''simple docstring'''
if not batched:
UpperCamelCase__ = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ = image.size
else:
UpperCamelCase__ , UpperCamelCase__ = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ = self.size['''shortest_edge''']
UpperCamelCase__ = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ = self.size['''shortest_edge''']
UpperCamelCase__ = self.size['''shortest_edge''']
else:
UpperCamelCase__ = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[0] )[0]
UpperCamelCase__ = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A ( __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] =DetaImageProcessor if is_vision_available() else None
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = DetaImageProcessingTester(self )
@property
def _a (self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_rescale''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_pad''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> List[Any]:
'''simple docstring'''
pass
def _a (self ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a (self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a (self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ = json.loads(f.read() )
UpperCamelCase__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
UpperCamelCase__ = DetaImageProcessor()
UpperCamelCase__ = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
# verify area
UpperCamelCase__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
UpperCamelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# verify image_id
UpperCamelCase__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
UpperCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
UpperCamelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE_ ) )
# verify orig_size
UpperCamelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE_ ) )
# verify size
UpperCamelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE_ ) )
@slow
def _a (self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ = json.loads(f.read() )
UpperCamelCase__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
UpperCamelCase__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ = DetaImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , masks_path=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
# verify area
UpperCamelCase__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
UpperCamelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# verify image_id
UpperCamelCase__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
UpperCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
UpperCamelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE_ ) )
# verify masks
UpperCamelCase__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , SCREAMING_SNAKE_CASE_ )
# verify orig_size
UpperCamelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE_ ) )
# verify size
UpperCamelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE_ ) )
| 415 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : Dict ) -> Dict:
lowerCamelCase_ , lowerCamelCase_ = [], []
while len(_lowerCamelCase ) > 1:
lowerCamelCase_ , lowerCamelCase_ = min(_lowerCamelCase ), max(_lowerCamelCase )
start.append(_lowerCamelCase )
end.append(_lowerCamelCase )
collection.remove(_lowerCamelCase )
collection.remove(_lowerCamelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
_SCREAMING_SNAKE_CASE : int = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 137 |
"""simple docstring"""
import math
def lowerCamelCase__ ( _lowerCamelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( _lowerCamelCase : float = 0.1 ) -> int:
lowerCamelCase_ = 3
lowerCamelCase_ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137 | 1 |
from manim import *
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : str ):
__A = Rectangle(height=0.5 ,width=0.5 )
__A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
__A = [mem.copy() for i in range(6 )]
__A = [mem.copy() for i in range(6 )]
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = VGroup(A ,A ).arrange(A ,buff=0 )
__A = Text("CPU" ,font_size=24 )
__A = Group(A ,A ).arrange(A ,buff=0.5 ,aligned_edge=A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A )
__A = [mem.copy() for i in range(1 )]
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = Text("GPU" ,font_size=24 )
__A = Group(A ,A ).arrange(A ,buff=0.5 ,aligned_edge=A )
gpu.align_to(A ,A )
gpu.set_x(gpu.get_x() - 1 )
self.add(A )
__A = [mem.copy() for i in range(6 )]
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = Text("Model" ,font_size=24 )
__A = Group(A ,A ).arrange(A ,buff=0.5 ,aligned_edge=A )
model.move_to([3, -1.0, 0] )
self.play(
Create(A ,run_time=1 ) ,Create(A ,run_time=1 ) ,Create(A ,run_time=1 ) ,)
__A = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
__A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__A = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(A ,run_time=2.5 ) ,Write(A ) ,Write(A ) )
self.add(A )
__A = []
__A = []
__A = []
for i, rect in enumerate(A ):
__A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(A ,opacity=0.7 )
cpu_target.move_to(A )
cpu_target.generate_target()
__A = 0.46 / 4
__A = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=A )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=A ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=A ,buff=0.0 )
cpu_targs.append(A )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(A ) )
second_animations.append(MoveToTarget(A ,run_time=1.5 ) )
self.play(*A )
self.play(*A )
self.wait()
| 55 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : str = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 188 | 0 |
"""simple docstring"""
class lowerCAmelCase :
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
lowerCamelCase_ = name
lowerCamelCase_ = val
def __str__( self ) -> Any:
'''simple docstring'''
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.val < other.val
class lowerCAmelCase :
"""simple docstring"""
def __init__( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
lowerCamelCase_ = {}
lowerCamelCase_ = {}
lowerCamelCase_ = self.build_heap(UpperCamelCase__ )
def __getitem__( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.get_value(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
return (idx - 1) // 2
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
return idx * 2 + 1
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return idx * 2 + 2
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.heap_dict[key]
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = len(UpperCamelCase__ ) - 1
lowerCamelCase_ = self.get_parent_idx(UpperCamelCase__ )
for idx, i in enumerate(UpperCamelCase__ ):
lowerCamelCase_ = idx
lowerCamelCase_ = i.val
for i in range(UpperCamelCase__ , -1 , -1 ):
self.sift_down(UpperCamelCase__ , UpperCamelCase__ )
return array
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
while True:
lowerCamelCase_ = self.get_left_child_idx(UpperCamelCase__ ) # noqa: E741
lowerCamelCase_ = self.get_right_child_idx(UpperCamelCase__ )
lowerCamelCase_ = idx
if l < len(UpperCamelCase__ ) and array[l] < array[idx]:
lowerCamelCase_ = l
if r < len(UpperCamelCase__ ) and array[r] < array[smallest]:
lowerCamelCase_ = r
if smallest != idx:
lowerCamelCase_ , lowerCamelCase_ = array[smallest], array[idx]
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCamelCase_ = smallest
else:
break
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_parent_idx(UpperCamelCase__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCamelCase_ , lowerCamelCase_ = self.heap[idx], self.heap[p]
lowerCamelCase_ , lowerCamelCase_ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCamelCase_ = p
lowerCamelCase_ = self.get_parent_idx(UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return self.heap[0]
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = self.heap[-1], self.heap[0]
lowerCamelCase_ , lowerCamelCase_ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCamelCase_ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
self.heap.append(UpperCamelCase__ )
lowerCamelCase_ = len(self.heap ) - 1
lowerCamelCase_ = node.val
self.sift_up(len(self.heap ) - 1 )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.heap ) == 0
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCamelCase_ = new_value
lowerCamelCase_ = new_value
self.sift_up(self.idx_of_element[node] )
__lowercase : Optional[int] = Node("""R""", -1)
__lowercase : str = Node("""B""", 6)
__lowercase : Dict = Node("""A""", 3)
__lowercase : str = Node("""X""", 1)
__lowercase : List[str] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__lowercase : List[str] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 66 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class lowerCAmelCase :
"""simple docstring"""
def __init__( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
lowerCamelCase_ = str(id_ )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = []
lowerCamelCase_ = {} # {vertex:distance}
def __lt__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return self.id
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
self.neighbors.append(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = weight
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Dict ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowerCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : list , _lowerCamelCase : Vertex ):
lowerCamelCase_ = []
for u in graph:
lowerCamelCase_ = math.inf
lowerCamelCase_ = None
lowerCamelCase_ = 0
lowerCamelCase_ = graph[:]
while q:
lowerCamelCase_ = min(_lowerCamelCase )
q.remove(_lowerCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCamelCase_ = u
lowerCamelCase_ = u.edges[v.id]
for i in range(1 , len(_lowerCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCamelCase_ ( _lowerCamelCase : list , _lowerCamelCase : Vertex ):
for u in graph:
lowerCamelCase_ = math.inf
lowerCamelCase_ = None
lowerCamelCase_ = 0
lowerCamelCase_ = list(_lowerCamelCase )
hq.heapify(_lowerCamelCase )
while h:
lowerCamelCase_ = hq.heappop(_lowerCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCamelCase_ = u
lowerCamelCase_ = u.edges[v.id]
hq.heapify(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCamelCase_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 66 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 87 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : int = """M-CLIP"""
def __init__( self :Any , lowerCamelCase__ :Dict=10_24 , lowerCamelCase__ :str=7_68 , **lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Optional[int] = transformerDimSize
UpperCamelCase__ :Dict = imageDimSize
super().__init__(**lowerCamelCase__ )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Dict = MCLIPConfig
def __init__( self :List[Any] , lowerCamelCase__ :Union[str, Any] , *lowerCamelCase__ :str , **lowerCamelCase__ :Any ):
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = XLMRobertaModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __a ( self :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Tuple = self.transformer(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
UpperCamelCase__ :str = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCamelCase__ ), embs | 383 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase = logging.getLogger()
def A ( lowercase__ : str ) -> str:
UpperCamelCase__ :int = {}
UpperCamelCase__ :List[str] = os.path.join(lowercase__ , """all_results.json""" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , """r""" ) as f:
UpperCamelCase__ :List[Any] = json.load(lowercase__ )
else:
raise ValueError(f"""can't find {path}""" )
return results
UpperCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :Dict ):
import xla_spawn
UpperCamelCase__ :Optional[int] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ :int = f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
UpperCamelCase__ :Any = time()
xla_spawn.main()
UpperCamelCase__ :Optional[Any] = time()
UpperCamelCase__ :Optional[Any] = get_results(lowerCamelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def __a ( self :Union[str, Any] ):
import xla_spawn
UpperCamelCase__ :List[str] = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
xla_spawn.main() | 383 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 311 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """vit"""
def __init__( self , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=224 , UpperCamelCase__=16 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=16 , **UpperCamelCase__ , ) -> Union[str, Any]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : int = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : int = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : List[Any] = image_size
lowerCamelCase : Union[str, Any] = patch_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Union[str, Any] = qkv_bias
lowerCamelCase : Union[str, Any] = encoder_stride
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Any = version.parse("""1.11""" )
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase ( self ) -> float:
return 1e-4
| 311 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def a_ ( _A ) -> List[str]:
"""simple docstring"""
snake_case__ = SwinConfig(image_size=192 )
if "base" in model_name:
snake_case__ = 6
snake_case__ = 128
snake_case__ = (2, 2, 18, 2)
snake_case__ = (4, 8, 16, 32)
elif "large" in model_name:
snake_case__ = 12
snake_case__ = 192
snake_case__ = (2, 2, 18, 2)
snake_case__ = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
snake_case__ = window_size
snake_case__ = embed_dim
snake_case__ = depths
snake_case__ = num_heads
return config
def a_ ( _A ) -> List[Any]:
"""simple docstring"""
if "encoder.mask_token" in name:
snake_case__ = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
snake_case__ = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
snake_case__ = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
snake_case__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
snake_case__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
snake_case__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
snake_case__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
snake_case__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
snake_case__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
snake_case__ = 'layernorm.weight'
if name == "encoder.norm.bias":
snake_case__ = 'layernorm.bias'
if "decoder" in name:
pass
else:
snake_case__ = 'swin.' + name
return name
def a_ ( _A , _A ) -> List[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(_A )
if "attn_mask" in key:
pass
elif "qkv" in key:
snake_case__ = key.split('.' )
snake_case__ = int(key_split[2] )
snake_case__ = int(key_split[4] )
snake_case__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case__ = val[:dim, :]
snake_case__ = val[
dim : dim * 2, :
]
snake_case__ = val[-dim:, :]
else:
snake_case__ = val[
:dim
]
snake_case__ = val[
dim : dim * 2
]
snake_case__ = val[
-dim:
]
else:
snake_case__ = val
return orig_state_dict
def a_ ( _A , _A , _A , _A ) -> Any:
"""simple docstring"""
snake_case__ = torch.load(_A , map_location='cpu' )['model']
snake_case__ = get_swin_config(_A )
snake_case__ = SwinForMaskedImageModeling(_A )
model.eval()
snake_case__ = convert_state_dict(_A , _A )
model.load_state_dict(_A )
snake_case__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ = ViTImageProcessor(size={'height': 192, 'width': 192} )
snake_case__ = Image.open(requests.get(_A , stream=_A ).raw )
snake_case__ = image_processor(images=_A , return_tensors='pt' )
with torch.no_grad():
snake_case__ = model(**_A ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_A )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 372 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase : Optional[Any] = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Path , UpperCamelCase: Union[str, None] = None , UpperCamelCase: Union[List[str], None] = None , UpperCamelCase: Union[str, List[str], None] = None , UpperCamelCase: bool = True , ) -> int:
snake_case__ = [file for file in os.listdir(UpperCamelCase ) if os.path.isfile(os.path.join(UpperCamelCase , UpperCamelCase ) )]
if identifier is not None:
snake_case__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(UpperCamelCase , UpperCamelCase ):
for n_ in n_identifier:
snake_case__ = [file for file in files if n_ not in file]
else:
snake_case__ = [file for file in files if n_identifier not in file]
snake_case__ = ignore_files or []
ignore_files.append('__init__.py' )
snake_case__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , UpperCamelCase )
if only_modules:
snake_case__ = file.split('.' )[0]
try:
snake_case__ = getattr(UpperCamelCase , UpperCamelCase )
snake_case__ = doctest.DocTestSuite(UpperCamelCase )
snake_case__ = unittest.TextTestRunner().run(UpperCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
snake_case__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowerCAmelCase_ ( self: Tuple ) -> int:
snake_case__ = Path('src/transformers' )
snake_case__ = 'modeling'
snake_case__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase , ignore_files=UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
snake_case__ = Path('src/transformers' )
snake_case__ = 'tokenization'
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> int:
snake_case__ = Path('src/transformers' )
snake_case__ = 'configuration'
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase )
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
snake_case__ = Path('src/transformers' )
snake_case__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(UpperCamelCase , n_identifier=UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple ) -> Union[str, Any]:
snake_case__ = Path('docs/source' )
snake_case__ = ['favicon.ico']
self.analyze_directory(UpperCamelCase , ignore_files=UpperCamelCase , only_modules=UpperCamelCase )
| 372 | 1 |
'''simple docstring'''
import inspect
import unittest
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE : Any = inspect.getmembers(A, inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE : Dict = 'k-diffusion'
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE : Union[str, Any] = 'invisible-watermark'
assert backend in deps, F"{backend} is not in the deps table!"
| 28 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 1 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : int = 1000 ):
"""simple docstring"""
__lowercase , __lowercase = 1, 1
__lowercase = []
for i in range(1 , n + 1 ):
__lowercase = prev_numerator + 2 * prev_denominator
__lowercase = prev_numerator + prev_denominator
if len(str(UpperCamelCase__ ) ) > len(str(UpperCamelCase__ ) ):
result.append(UpperCamelCase__ )
__lowercase = numerator
__lowercase = denominator
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 716 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ ={"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ =[
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 442 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''vivit'''
def __init__( self , __lowerCAmelCase=2_2_4 , __lowerCAmelCase=3_2 , __lowerCAmelCase=[2, 1_6, 1_6] , __lowerCAmelCase=3 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu_fast" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-06 , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Dict = hidden_size
__magic_name__ :Tuple = num_hidden_layers
__magic_name__ :Dict = num_attention_heads
__magic_name__ :int = intermediate_size
__magic_name__ :Optional[int] = hidden_act
__magic_name__ :str = hidden_dropout_prob
__magic_name__ :int = attention_probs_dropout_prob
__magic_name__ :Dict = initializer_range
__magic_name__ :Optional[int] = layer_norm_eps
__magic_name__ :Optional[int] = image_size
__magic_name__ :Optional[int] = num_frames
__magic_name__ :List[Any] = tubelet_size
__magic_name__ :List[str] = num_channels
__magic_name__ :Optional[int] = qkv_bias
super().__init__(**__lowerCAmelCase )
| 0 | '''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase_ : Optional[Any] = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __A ( UpperCAmelCase ,UpperCAmelCase=None ) -> int:
'''simple docstring'''
require_version(deps[pkg] ,UpperCAmelCase )
| 435 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
class UpperCAmelCase ( snake_case_ ,snake_case_ ):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__( self , _lowerCAmelCase = 2_000 , _lowerCAmelCase = 0.15 , _lowerCAmelCase = 0.01 , _lowerCAmelCase = 1_348.0 , _lowerCAmelCase = 1E-5 , _lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
_lowerCAmelCase = sigma_max
# setable values
_lowerCAmelCase = None
self.set_sigmas(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
return sample
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None ):
_lowerCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase = torch.linspace(1 , _lowerCAmelCase , _lowerCAmelCase , device=_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None ):
_lowerCAmelCase = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase = torch.exp(torch.linspace(math.log(_lowerCAmelCase ) , math.log(_lowerCAmelCase ) , _lowerCAmelCase ) )
_lowerCAmelCase = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_lowerCAmelCase = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase = timesteps.to(self.discrete_sigmas.device )
_lowerCAmelCase = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCAmelCase = self.get_adjacent_sigma(_lowerCAmelCase , _lowerCAmelCase ).to(sample.device )
_lowerCAmelCase = torch.zeros_like(_lowerCAmelCase )
_lowerCAmelCase = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCAmelCase = diffusion.unsqueeze(-1 )
_lowerCAmelCase = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase = randn_tensor(
sample.shape , layout=sample.layout , generator=_lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_lowerCAmelCase = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_lowerCAmelCase , prev_sample_mean=_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase = randn_tensor(sample.shape , layout=sample.layout , generator=_lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCAmelCase = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCAmelCase = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCAmelCase = step_size.unsqueeze(-1 )
_lowerCAmelCase = sample + step_size * model_output
_lowerCAmelCase = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowerCAmelCase = timesteps.to(original_samples.device )
_lowerCAmelCase = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCAmelCase = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCAmelCase = noise + original_samples
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps | 664 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase_ = 1_0
UpperCAmelCase_ = 2_5_6
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]:
if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE )
for token in set(_SCREAMING_SNAKE_CASE ):
min_hash.update(token.encode() )
return min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]:
return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0}
class UpperCAmelCase :
def __init__( self , *,
_lowerCAmelCase = 0.85 , ):
_lowerCAmelCase = duplication_jaccard_threshold
_lowerCAmelCase = NUM_PERM
_lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase = defaultdict(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self._index.query(_lowerCAmelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase = [base] + list(_lowerCAmelCase )
# reformat the cluster to be a list of dict
_lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_lowerCAmelCase )
return duplicate_clusters
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.get_duplicate_clusters()
with open(_lowerCAmelCase , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = element
_lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str:
_lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ):
di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float:
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase_ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]:
_lowerCAmelCase = []
for elementa in cluster:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase = 1
extremes.append(_SCREAMING_SNAKE_CASE )
return extremes
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple:
global _shared_dataset
_lowerCAmelCase = dataset
_lowerCAmelCase = []
_lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ):
extremes_list.append(_SCREAMING_SNAKE_CASE )
return extremes_list
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase = {}
_lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase = element
_lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
return ds_filter, duplicate_clusters | 664 | 1 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCAmelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCAmelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCAmelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCAmelCase = np.expand_dims(test_image, axis=0)
__UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCAmelCase = '''Normal'''
if result[0][0] == 1:
__UpperCAmelCase = '''Abnormality detected'''
| 40 | 0 |
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
a__ = str(bin(__UpperCAmelCase ) )
binary_number += "0" * shift_amount
return binary_number
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
a__ = str(bin(__UpperCAmelCase ) )[2:]
if shift_amount >= len(__UpperCAmelCase ):
return "0b0"
a__ = binary_number[: len(__UpperCAmelCase ) - shift_amount]
return "0b" + shifted_binary_number
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
if number >= 0: # Get binary representation of positive number
a__ = '''0''' + str(bin(__UpperCAmelCase ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
a__ = len(bin(__UpperCAmelCase )[3:] ) # Find 2's complement of number
a__ = bin(abs(__UpperCAmelCase ) - (1 << binary_number_length) )[3:]
a__ = (
'''1''' + '''0''' * (binary_number_length - len(__UpperCAmelCase )) + binary_number
)
if shift_amount >= len(__UpperCAmelCase ):
return "0b" + binary_number[0] * len(__UpperCAmelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__UpperCAmelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1_2 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=9_9 , SCREAMING_SNAKE_CASE=3_2 , SCREAMING_SNAKE_CASE=3_2 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3_7 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=5_1_2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = projection_dim
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = dropout
a__ = attention_dropout
a__ = max_position_embeddings
a__ = initializer_range
a__ = scope
a__ = bos_token_id
def _UpperCAmelCase ( self ) -> str:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
a__ = input_mask.numpy()
a__ , a__ = input_mask.shape
a__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE ):
a__ = 1
a__ = 0
a__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Dict:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
a__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE )
a__ = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
a__ = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ) -> Tuple:
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = (TFBlipTextModel,) if is_tf_available() else ()
_lowercase : Optional[int] = False
_lowercase : Dict = False
_lowercase : str = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = BlipTextModelTester(self )
a__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> int:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@slow
def _UpperCAmelCase ( self ) -> Any:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE=True ) -> Tuple:
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE )
| 148 | 1 |
"""simple docstring"""
from __future__ import annotations
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple , _lowerCamelCase : str ):
A__ = order
# a_{0} ... a_{k}
A__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A__ = [0.0] * self.order
# y[n-1] ... y[n-k]
A__ = [0.0] * self.order
def A__ ( self : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict ):
if len(A_ ) < self.order:
A__ = [1.0, *a_coeffs]
if len(A_ ) != self.order + 1:
A__ = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(A_ )}'''
)
raise ValueError(A_ )
if len(A_ ) != self.order + 1:
A__ = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(A_ )}'''
)
raise ValueError(A_ )
A__ = a_coeffs
A__ = b_coeffs
def A__ ( self : Any , _lowerCamelCase : Union[str, Any] ):
A__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A__ = self.input_history[:-1]
A__ = self.output_history[:-1]
A__ = sample
A__ = result
return result
| 571 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 100 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __A ):
'''simple docstring'''
snake_case_ = (DEISMultistepScheduler,)
snake_case_ = (('num_inference_steps', 25),)
def __lowercase ( self : Any , **UpperCamelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
_lowercase : Any = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**UpperCamelCase_ )
return config
def __lowercase ( self : List[str] , UpperCamelCase_ : Tuple=0 , **UpperCamelCase_ : int ) -> Tuple:
'''simple docstring'''
_lowercase : Optional[int] = dict(self.forward_default_kwargs )
_lowercase : List[Any] = kwargs.pop('''num_inference_steps''' , UpperCamelCase_ )
_lowercase : int = self.dummy_sample
_lowercase : Tuple = 0.1 * sample
_lowercase : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowercase : Union[str, Any] = self.get_scheduler_config(**UpperCamelCase_ )
_lowercase : Optional[int] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
_lowercase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
_lowercase : List[str] = scheduler_class.from_pretrained(UpperCamelCase_ )
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
_lowercase : int = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase : Dict = sample, sample
for t in range(UpperCamelCase_ , time_step + scheduler.config.solver_order + 1 ):
_lowercase : int = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
_lowercase : int = new_scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowercase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
pass
def __lowercase ( self : Tuple , UpperCamelCase_ : List[Any]=0 , **UpperCamelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : str = dict(self.forward_default_kwargs )
_lowercase : Any = kwargs.pop('''num_inference_steps''' , UpperCamelCase_ )
_lowercase : int = self.dummy_sample
_lowercase : List[Any] = 0.1 * sample
_lowercase : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowercase : Tuple = self.get_scheduler_config()
_lowercase : Any = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
_lowercase : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
_lowercase : List[str] = scheduler_class.from_pretrained(UpperCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
_lowercase : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase : Optional[int] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
_lowercase : str = new_scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowercase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[Any] ) -> int:
'''simple docstring'''
if scheduler is None:
_lowercase : Optional[Any] = self.scheduler_classes[0]
_lowercase : Any = self.get_scheduler_config(**UpperCamelCase_ )
_lowercase : Union[str, Any] = scheduler_class(**UpperCamelCase_ )
_lowercase : Optional[Any] = self.scheduler_classes[0]
_lowercase : Any = self.get_scheduler_config(**UpperCamelCase_ )
_lowercase : Dict = scheduler_class(**UpperCamelCase_ )
_lowercase : List[Any] = 10
_lowercase : List[Any] = self.dummy_model()
_lowercase : Any = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_lowercase : Optional[Any] = model(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Union[str, Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def __lowercase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_lowercase : str = dict(self.forward_default_kwargs )
_lowercase : Dict = kwargs.pop('''num_inference_steps''' , UpperCamelCase_ )
for scheduler_class in self.scheduler_classes:
_lowercase : Optional[int] = self.get_scheduler_config()
_lowercase : Dict = scheduler_class(**UpperCamelCase_ )
_lowercase : Optional[Any] = self.dummy_sample
_lowercase : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase_ , '''set_timesteps''' ):
scheduler.set_timesteps(UpperCamelCase_ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase_ , '''set_timesteps''' ):
_lowercase : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowercase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
_lowercase : int = dummy_past_residuals[: scheduler.config.solver_order]
_lowercase : str = scheduler.timesteps[5]
_lowercase : List[str] = scheduler.timesteps[6]
_lowercase : Any = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
_lowercase : str = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowercase ( self : int ) -> Tuple:
'''simple docstring'''
_lowercase : Any = DEISMultistepScheduler(**self.get_scheduler_config() )
_lowercase : Optional[int] = self.full_loop(scheduler=UpperCamelCase_ )
_lowercase : str = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
_lowercase : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowercase : int = UniPCMultistepScheduler.from_config(scheduler.config )
_lowercase : Tuple = DEISMultistepScheduler.from_config(scheduler.config )
_lowercase : int = self.full_loop(scheduler=UpperCamelCase_ )
_lowercase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def __lowercase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def __lowercase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , algorithm_type='''deis''' , solver_order=UpperCamelCase_ , solver_type=UpperCamelCase_ , )
def __lowercase ( self : int ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def __lowercase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCamelCase_ , solver_type=UpperCamelCase_ , prediction_type=UpperCamelCase_ , algorithm_type=UpperCamelCase_ , )
_lowercase : Any = self.full_loop(
solver_order=UpperCamelCase_ , solver_type=UpperCamelCase_ , prediction_type=UpperCamelCase_ , algorithm_type=UpperCamelCase_ , )
assert not torch.isnan(UpperCamelCase_ ).any(), "Samples have nan numbers"
def __lowercase ( self : Dict ) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(lower_order_final=UpperCamelCase_ )
self.check_over_configs(lower_order_final=UpperCamelCase_ )
def __lowercase ( self : Dict ) -> Tuple:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=UpperCamelCase_ , time_step=0 )
def __lowercase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Any = self.full_loop()
_lowercase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def __lowercase ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowercase : Optional[int] = self.full_loop(prediction_type='''v_prediction''' )
_lowercase : List[str] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def __lowercase ( self : str ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = self.scheduler_classes[0]
_lowercase : str = self.get_scheduler_config(thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0 )
_lowercase : List[Any] = scheduler_class(**UpperCamelCase_ )
_lowercase : Optional[int] = 10
_lowercase : int = self.dummy_model()
_lowercase : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_lowercase : List[Any] = model(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
| 717 |
'''simple docstring'''
import math
def _SCREAMING_SNAKE_CASE( snake_case_ : int ) ->list[int]:
'''simple docstring'''
_lowercase : Optional[int] = []
_lowercase : Any = 2
_lowercase : List[str] = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowercase : Tuple = [True] * (end + 1)
_lowercase : List[str] = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowercase : Tuple = False
start += 1
prime += in_prime
_lowercase : str = end + 1
_lowercase : Optional[int] = min(2 * end , snake_case_ )
while low <= n:
_lowercase : Optional[int] = [True] * (high - low + 1)
for each in in_prime:
_lowercase : Union[str, Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowercase : Optional[int] = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowercase : Union[str, Any] = high + 1
_lowercase : Tuple = min(high + end , snake_case_ )
return prime
print(sieve(10**6))
| 411 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = [[1, 2, 4], [1, 2, 3, 4]]
_lowercase : int = DisjunctiveConstraint(lowerCamelCase)
self.assertTrue(isinstance(dc.token_ids, lowerCamelCase))
with self.assertRaises(lowerCamelCase):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCamelCase):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCamelCase):
DisjunctiveConstraint(lowerCamelCase) # fails here
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = [[1, 2, 3], [1, 2, 4]]
_lowercase : Any = DisjunctiveConstraint(lowerCamelCase)
_lowercase , _lowercase , _lowercase : Tuple = dc.update(1)
_lowercase : Any = stepped is True and completed is False and reset is False
self.assertTrue(lowerCamelCase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
_lowercase , _lowercase , _lowercase : str = dc.update(2)
_lowercase : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCamelCase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
_lowercase , _lowercase , _lowercase : List[str] = dc.update(3)
_lowercase : Optional[Any] = stepped is True and completed is True and reset is False
self.assertTrue(lowerCamelCase)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_lowercase : Optional[int] = DisjunctiveConstraint(lowerCamelCase)
_lowercase , _lowercase , _lowercase : Tuple = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
_lowercase , _lowercase , _lowercase : Optional[int] = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
_lowercase , _lowercase , _lowercase : List[str] = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
_lowercase , _lowercase , _lowercase : str = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
_lowercase , _lowercase , _lowercase : Tuple = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
_lowercase , _lowercase , _lowercase : Dict = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
_lowercase , _lowercase , _lowercase : Tuple = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 89 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCAmelCase__ ( __lowercase ):
def __init__( self , a=0.01 , a=10_00 ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = p_stop
_UpperCamelCase = max_length
def __iter__( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
_UpperCamelCase = random.random() < self.p_stop
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self , a , a , a=False , a=True ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = [
BatchSamplerShard(a , 2 , a , split_batches=a , even_batches=a )
for i in range(2 )
]
_UpperCamelCase = [list(a ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a ) for shard in batch_sampler_shards] , [len(a ) for e in expected] )
self.assertListEqual(a , a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_UpperCamelCase = [BatchSamplerShard(a , 2 , a , even_batches=a ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def A_ ( self , a , a , a , a=False , a=2 , a=False ) -> Any:
'''simple docstring'''
random.seed(a )
_UpperCamelCase = list(a )
_UpperCamelCase = [
IterableDatasetShard(
a , batch_size=a , drop_last=a , num_processes=a , process_index=a , split_batches=a , )
for i in range(a )
]
_UpperCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a )
iterable_dataset_lists.append(list(a ) )
_UpperCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_UpperCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a ) , len(a ) )
self.assertTrue(len(a ) % shard_batch_size == 0 )
_UpperCamelCase = []
for idx in range(0 , len(a ) , a ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a ) < len(a ):
reference += reference
self.assertListEqual(a , reference[: len(a )] )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
# Edge case with a very small dataset
_UpperCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = SkipBatchSampler(a , 2 )
self.assertListEqual(list(a ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
_UpperCamelCase = skip_first_batches(a , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
Accelerator()
_UpperCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 612 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class a ( A__ ):
"""simple docstring"""
lowerCamelCase :Tuple = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
lowerCamelCase :int = field(default=A__ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
lowerCamelCase :List[str] = field(
default=A__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase :List[Any] = field(default=A__ , metadata={'''help''': '''whether to use adafactor'''} )
lowerCamelCase :List[str] = field(
default=A__ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
lowerCamelCase :int = field(
default=A__ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
lowerCamelCase :Union[str, Any] = field(default=A__ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
lowerCamelCase :List[Any] = field(
default=A__ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
lowerCamelCase :List[Any] = field(
default='''linear''' , metadata={'''help''': f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 401 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = None
_a = None
_a = None
_a = None
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=512 , UpperCamelCase_="cls" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = project_dim
UpperCamelCase__ :List[str] = pooler_fn
UpperCamelCase__ :Dict = learn_encoder
UpperCamelCase__ :Any = use_attention_mask
class lowercase ( A__ ):
"""simple docstring"""
_a = [R'pooler', R'logit_scale']
_a = [R'position_ids', R'predictions.decoder.bias']
_a = 'roberta'
_a = RobertaSeriesConfig
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
super().__init__(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = XLMRobertaModel(UpperCamelCase_ )
UpperCamelCase__ :Tuple = nn.Linear(config.hidden_size , config.project_dim )
UpperCamelCase__ :Union[str, Any] = getattr(UpperCamelCase_ , '''has_pre_transformation''' , UpperCamelCase_ )
if self.has_pre_transformation:
UpperCamelCase__ :Any = nn.Linear(config.hidden_size , config.project_dim )
UpperCamelCase__ :str = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowerCAmelCase__ ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ :str = self.base_model(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_attentions=UpperCamelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCamelCase_ , )
if self.has_pre_transformation:
UpperCamelCase__ :Dict = outputs['''hidden_states'''][-2]
UpperCamelCase__ :Optional[int] = self.pre_LN(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = self.transformation_pre(UpperCamelCase_ )
return TransformationModelOutput(
projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCamelCase__ :List[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 189 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: int = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __lowerCamelCase ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = AudioClassificationPipeline(model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
# test with a raw waveform
_lowerCAmelCase = np.zeros((34_000,) )
_lowerCAmelCase = np.zeros((14_000,) )
return audio_classifier, [audioa, audio]
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = examples
_lowerCAmelCase = audio_classifier(UpperCAmelCase_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
UpperCAmelCase_ , [
{'score': ANY(UpperCAmelCase_ ), 'label': ANY(UpperCAmelCase_ )},
{'score': ANY(UpperCAmelCase_ ), 'label': ANY(UpperCAmelCase_ )},
] , )
_lowerCAmelCase = audio_classifier(UpperCAmelCase_ , top_k=1 )
self.assertEqual(
UpperCAmelCase_ , [
{'score': ANY(UpperCAmelCase_ ), 'label': ANY(UpperCAmelCase_ )},
] , )
self.run_torchaudio(UpperCAmelCase_ )
@require_torchaudio
def __lowerCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
import datasets
# test with a local file
_lowerCAmelCase = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
_lowerCAmelCase = dataset[0]['audio']['array']
_lowerCAmelCase = audio_classifier(UpperCAmelCase_ )
self.assertEqual(
UpperCAmelCase_ , [
{'score': ANY(UpperCAmelCase_ ), 'label': ANY(UpperCAmelCase_ )},
{'score': ANY(UpperCAmelCase_ ), 'label': ANY(UpperCAmelCase_ )},
] , )
@require_torch
def __lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_lowerCAmelCase = 'anton-l/wav2vec2-random-tiny-classifier'
_lowerCAmelCase = pipeline('audio-classification' , model=UpperCAmelCase_ )
_lowerCAmelCase = np.ones((8_000,) )
_lowerCAmelCase = audio_classifier(UpperCAmelCase_ , top_k=4 )
_lowerCAmelCase = [
{'score': 0.0842, 'label': 'no'},
{'score': 0.0838, 'label': 'up'},
{'score': 0.0837, 'label': 'go'},
{'score': 0.0834, 'label': 'right'},
]
_lowerCAmelCase = [
{'score': 0.0845, 'label': 'stop'},
{'score': 0.0844, 'label': 'on'},
{'score': 0.0841, 'label': 'right'},
{'score': 0.0834, 'label': 'left'},
]
self.assertIn(nested_simplify(UpperCAmelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase = {'array': np.ones((8_000,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase = audio_classifier(UpperCAmelCase_ , top_k=4 )
self.assertIn(nested_simplify(UpperCAmelCase_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
import datasets
_lowerCAmelCase = 'superb/wav2vec2-base-superb-ks'
_lowerCAmelCase = pipeline('audio-classification' , model=UpperCAmelCase_ )
_lowerCAmelCase = datasets.load_dataset('anton-l/superb_dummy' , 'ks' , split='test' )
_lowerCAmelCase = np.array(dataset[3]['speech'] , dtype=np.floataa )
_lowerCAmelCase = audio_classifier(UpperCAmelCase_ , top_k=4 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=3 ) , [
{'score': 0.981, 'label': 'go'},
{'score': 0.007, 'label': 'up'},
{'score': 0.006, 'label': '_unknown_'},
{'score': 0.001, 'label': 'down'},
] , )
@require_tf
@unittest.skip('Audio classification is not implemented for TF' )
def __lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
| 491 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def __snake_case ( SCREAMING_SNAKE_CASE: np.ndarray ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def __snake_case ( SCREAMING_SNAKE_CASE: np.ndarray ):
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def __snake_case ( SCREAMING_SNAKE_CASE: np.ndarray , SCREAMING_SNAKE_CASE: np.ndarray ):
"""simple docstring"""
_lowerCAmelCase = np.zeros_like(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
_lowerCAmelCase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
_lowerCAmelCase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
_lowerCAmelCase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_snake_case = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
_snake_case = np.array(Image.open(lena_path))
# kernel to be applied
_snake_case = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_snake_case = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_snake_case = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 491 | 1 |
_snake_case = '''Input must be a string of 8 numbers plus letter'''
_snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def _UpperCamelCase ( snake_case__ ) -> bool:
if not isinstance(UpperCAmelCase_, UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = f'''Expected string as input, found {type(UpperCAmelCase_ ).__name__}'''
raise TypeError(UpperCAmelCase_ )
__UpperCAmelCase : str = spanish_id.replace("-", "" ).upper()
if len(UpperCAmelCase_ ) != 9:
raise ValueError(UpperCAmelCase_ )
try:
__UpperCAmelCase : Tuple = int(spanish_id_clean[0:8] )
__UpperCAmelCase : Dict = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCAmelCase_ ) from ex
if letter.isdigit():
raise ValueError(UpperCAmelCase_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 382 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = KandinskyVaaImgaImgPipeline
UpperCAmelCase_ = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCAmelCase_ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCAmelCase_ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase_ = False
@property
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
return 32
@property
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
return 32
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return 1_00
@property
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : int = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_lowercase : List[str] = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.dummy_unet
_lowercase : Tuple = self.dummy_movq
_lowercase : Dict = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_lowercase : Dict = DDIMScheduler(**UpperCamelCase )
_lowercase : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str]=0 ):
"""simple docstring"""
_lowercase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_lowercase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase )
# create init_image
_lowercase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_lowercase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : Tuple = Image.fromarray(np.uinta(UpperCamelCase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(UpperCamelCase ).startswith('''mps''' ):
_lowercase : List[Any] = torch.manual_seed(UpperCamelCase )
else:
_lowercase : List[Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_lowercase : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : str = '''cpu'''
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : List[str] = self.pipeline_class(**UpperCamelCase )
_lowercase : Dict = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Dict = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
_lowercase : Optional[Any] = output.images
_lowercase : Optional[int] = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : List[Any] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
_lowercase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_lowercase : List[str] = '''A red cartoon frog, 4k'''
_lowercase : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
_lowercase : List[str] = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
_lowercase : int = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
_lowercase , _lowercase : Optional[Any] = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_lowercase : str = pipeline(
image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
_lowercase : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) | 322 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __A :
@staticmethod
def lowerCamelCase__ ( *__snake_case : List[Any] , **__snake_case : Optional[Any] ) -> int:
pass
@is_pipeline_test
@require_vision
@require_torch
class __A ( unittest.TestCase ):
UpperCAmelCase__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase__ ( self : Optional[int] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Tuple ) -> Any:
__magic_name__: Dict = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__magic_name__: Any = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Any , __snake_case : str ) -> Optional[int]:
__magic_name__: Dict = object_detector(examples[0] , threshold=0.0 )
__magic_name__: List[Any] = len(__snake_case )
self.assertGreater(__snake_case , 0 )
self.assertEqual(
__snake_case , [
{
"""score""": ANY(__snake_case ),
"""label""": ANY(__snake_case ),
"""box""": {"""xmin""": ANY(__snake_case ), """ymin""": ANY(__snake_case ), """xmax""": ANY(__snake_case ), """ymax""": ANY(__snake_case )},
}
for i in range(__snake_case )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def lowerCamelCase__ ( self : Any ) -> List[str]:
pass
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
__magic_name__: Optional[int] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__magic_name__: Optional[int] = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 6_7, """ymin""": 2_7_4, """xmax""": 9_3, """ymax""": 2_9_7}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
] , )
__magic_name__: Tuple = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 6_7, """ymin""": 2_7_4, """xmax""": 9_3, """ymax""": 2_9_7}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
]
] , )
@require_torch
@slow
def lowerCamelCase__ ( self : int ) -> Tuple:
__magic_name__: Optional[int] = pipeline("""zero-shot-object-detection""" )
__magic_name__: Union[str, Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}},
] , )
__magic_name__: Union[str, Any] = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def lowerCamelCase__ ( self : Dict ) -> Any:
pass
@require_torch
@slow
def lowerCamelCase__ ( self : Dict ) -> str:
__magic_name__: str = 0.2
__magic_name__: Union[str, Any] = pipeline("""zero-shot-object-detection""" )
__magic_name__: Dict = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=__snake_case , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
] , )
@require_torch
@slow
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
__magic_name__: Union[str, Any] = 2
__magic_name__: int = pipeline("""zero-shot-object-detection""" )
__magic_name__: List[Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=__snake_case , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
] , )
| 213 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __A ( unittest.TestCase ):
UpperCAmelCase__ = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__: int = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
__magic_name__: Dict = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
__magic_name__: Dict = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
__magic_name__: Optional[Any] = text_generator("""This is a test""" , do_sample=__snake_case , num_return_sequences=2 , return_tensors=__snake_case )
self.assertEqual(
__snake_case , [
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
] , )
__magic_name__: List[str] = text_generator.model.config.eos_token_id
__magic_name__: Dict = """<pad>"""
__magic_name__: Dict = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=__snake_case , )
self.assertEqual(
__snake_case , [
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
] , )
@require_tf
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__: int = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
__magic_name__: Optional[Any] = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
__magic_name__: Optional[int] = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Tuple ) -> Any:
__magic_name__: int = TextGenerationPipeline(model=__snake_case , tokenizer=__snake_case )
return text_generator, ["This is a test", "Another test"]
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: Tuple = """Hello I believe in"""
__magic_name__: List[str] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
__magic_name__: List[Any] = text_generator(__snake_case )
self.assertEqual(
__snake_case , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
__magic_name__: List[str] = text_generator(__snake_case , stop_sequence=""" fe""" )
self.assertEqual(__snake_case , [{"""generated_text""": """Hello I believe in fe"""}] )
def lowerCamelCase__ ( self : Any , __snake_case : List[Any] , __snake_case : Union[str, Any] ) -> str:
__magic_name__: Optional[int] = text_generator.model
__magic_name__: Union[str, Any] = text_generator.tokenizer
__magic_name__: Union[str, Any] = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__magic_name__: str = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__magic_name__: Optional[int] = pipeline(task="""text-generation""" , model=__snake_case , tokenizer=__snake_case , return_full_text=__snake_case )
__magic_name__: Tuple = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__magic_name__: Optional[int] = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__magic_name__: List[str] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__magic_name__: Union[str, Any] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
with self.assertRaises(__snake_case ):
__magic_name__: Any = text_generator("""test""" , return_full_text=__snake_case , return_text=__snake_case )
with self.assertRaises(__snake_case ):
__magic_name__: List[str] = text_generator("""test""" , return_full_text=__snake_case , return_tensors=__snake_case )
with self.assertRaises(__snake_case ):
__magic_name__: Tuple = text_generator("""test""" , return_text=__snake_case , return_tensors=__snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__magic_name__: int = text_generator("""""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__magic_name__: Any = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__magic_name__: Union[str, Any] = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_0_0 , max_new_tokens=2_0 )
__magic_name__: List[str] = text_generator("""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__snake_case ):
text_generator(
"""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
import torch
# Classic `model_kwargs`
__magic_name__: Optional[int] = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__: Optional[int] = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__magic_name__: Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__: Optional[Any] = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__magic_name__: int = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__magic_name__: Any = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCamelCase__ ( self : List[str] ) -> Any:
import torch
__magic_name__: List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : Dict ) -> Any:
import torch
__magic_name__: List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__snake_case , top_p=0.5 )
def lowerCamelCase__ ( self : List[str] ) -> Any:
__magic_name__: Optional[int] = """Hello world"""
__magic_name__: List[Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
__magic_name__: str = logging.get_logger("""transformers.generation.tf_utils""" )
else:
__magic_name__: Any = logging.get_logger("""transformers.generation.utils""" )
__magic_name__: Union[str, Any] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__snake_case ) as cl:
__magic_name__: Dict = text_generator(__snake_case , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__snake_case ) as cl:
__magic_name__: str = text_generator(__snake_case , max_new_tokens=1 )
self.assertNotIn(__snake_case , cl.out )
with CaptureLogger(__snake_case ) as cl:
__magic_name__: Dict = text_generator(__snake_case , max_length=1_0 )
self.assertNotIn(__snake_case , cl.out )
| 213 | 1 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase: Any ="\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
lowerCAmelCase: int ="\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
lowerCAmelCase: Any ="\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case="auto" , snake_case=-1 , snake_case=0.9 , snake_case=5 , snake_case=5_0_0 , snake_case="gpt2-large" , snake_case=-1 , snake_case=1_0_2_4 , snake_case=2_5 , snake_case=5 , snake_case=True , snake_case=2_5 , ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = compute_mauve(
p_text=snake_case , q_text=snake_case , p_features=snake_case , q_features=snake_case , p_tokens=snake_case , q_tokens=snake_case , num_buckets=snake_case , pca_max_data=snake_case , kmeans_explained_var=snake_case , kmeans_num_redo=snake_case , kmeans_max_iter=snake_case , featurize_model_name=snake_case , device_id=snake_case , max_text_length=snake_case , divergence_curve_discretization_size=snake_case , mauve_scaling_factor=snake_case , verbose=snake_case , seed=snake_case , )
return out
| 607 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCamelCase__ :
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case ) -> Dict:
"""simple docstring"""
return None
class lowerCamelCase__ :
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
return None
class lowerCamelCase__ ( unittest.TestCase ):
__UpperCAmelCase = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , """tf""" , 1_2 , **snake_case )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , """pt""" , 1_2 , **snake_case )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
from transformers import BertModel
lowercase : str = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(snake_case ) )
vocab_file.flush()
lowercase : Any = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase : Any = BertModel(BertConfig(vocab_size=len(snake_case ) ) )
model.save_pretrained(snake_case )
self._test_export(snake_case , """pt""" , 1_2 , snake_case )
@require_tf
@slow
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase : int = self._test_export(snake_case , """tf""" , 1_2 , **snake_case )
lowercase : str = quantize(Path(snake_case ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase : int = self._test_export(snake_case , """pt""" , 1_2 , **snake_case )
lowercase : Dict = quantize(snake_case )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ) -> List[str]:
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase : Union[str, Any] = Path(snake_case ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case )
return path
except Exception as e:
self.fail(snake_case )
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
from transformers import BertModel
lowercase : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowercase : int = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(snake_case , snake_case , """pt""" )
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
from transformers import TFBertModel
lowercase : Dict = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowercase : Any = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(snake_case , snake_case , """tf""" )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case ) -> List[Any]:
"""simple docstring"""
lowercase : Tuple = FeatureExtractionPipeline(snake_case , snake_case )
lowercase : str = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowercase , lowercase , lowercase , lowercase : Dict = infer_shapes(snake_case , snake_case )
# Assert all variables are present
self.assertEqual(len(snake_case ) , len(snake_case ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , snake_case )
self.assertSequenceEqual(variable_names[3:] , snake_case )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
lowercase : Union[str, Any] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowercase : List[Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowercase , lowercase : int = ensure_valid_input(FuncContiguousArgs() , snake_case , snake_case )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(snake_case ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(snake_case ) , set(snake_case ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(snake_case , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase , lowercase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , snake_case , snake_case )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(snake_case ) , 1 )
self.assertEqual(len(snake_case ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
lowercase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 607 | 1 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _UpperCamelCase ( __lowercase ):
"""simple docstring"""
__a : int = 0
__a : bool = False
__a : float = 3.0
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=__A ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
__lowercase = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__lowercase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , __A )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(__A , env=os.environ.copy() )
if __name__ == "__main__":
__a : Any = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
__a : Dict = Accelerator(kwargs_handlers=[ddp_scaler])
__a : Dict = torch.nn.Linear(1_0_0, 2_0_0)
__a : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
__a : Union[str, Any] = """"""
__a : Tuple = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 714 | def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = ''''''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return data[1:] + data[0]
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = ''''''
for i in range(len(lowercase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = int('''0b''' + data[0] + data[-1] , 2 )
__lowercase = int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = message[:4]
__lowercase = message[4:]
__lowercase = apply_table(lowercase , lowercase )
__lowercase = xor(lowercase , lowercase )
__lowercase = apply_sbox(lowercase , temp[:4] ) # noqa: E741
__lowercase = apply_sbox(lowercase , temp[4:] )
__lowercase = '''0''' * (2 - len(lowercase )) + l # noqa: E741
__lowercase = '''0''' * (2 - len(lowercase )) + r
__lowercase = apply_table(l + r , lowercase )
__lowercase = xor(lowercase , lowercase )
return temp + right
if __name__ == "__main__":
__a : str = input("""Enter 10 bit key: """)
__a : Optional[Any] = input("""Enter 8 bit message: """)
__a : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__a : Tuple = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__a : Optional[int] = [2, 4, 3, 1]
__a : Dict = [2, 6, 3, 1, 4, 8, 5, 7]
__a : Any = [4, 1, 3, 5, 7, 2, 8, 6]
__a : int = [4, 1, 2, 3, 2, 3, 4, 1]
__a : Union[str, Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__a : Tuple = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__a : Dict = apply_table(key, paa_table)
__a : Any = temp[:5]
__a : Optional[int] = temp[5:]
__a : List[Any] = left_shift(left)
__a : Any = left_shift(right)
__a : List[Any] = apply_table(left + right, pa_table)
__a : Any = left_shift(left)
__a : Dict = left_shift(right)
__a : Tuple = left_shift(left)
__a : List[Any] = left_shift(right)
__a : Any = apply_table(left + right, pa_table)
# encryption
__a : Dict = apply_table(message, IP)
__a : Optional[Any] = function(expansion, sa, sa, keya, temp)
__a : int = temp[4:] + temp[:4]
__a : Optional[int] = function(expansion, sa, sa, keya, temp)
__a : List[Any] = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
__a : Optional[Any] = apply_table(CT, IP)
__a : List[str] = function(expansion, sa, sa, keya, temp)
__a : Tuple = temp[4:] + temp[:4]
__a : List[Any] = function(expansion, sa, sa, keya, temp)
__a : Any = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT) | 522 | 0 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = list(s_dict.keys() )
for key in keys:
lowercase__ = R'''.*/layers_(\d+)'''
lowercase__ = key
if re.match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , SCREAMING_SNAKE_CASE )
lowercase__ = R'''(encoder|decoder)\/'''
if re.match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = re.match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).groups()
if groups[0] == "encoder":
lowercase__ = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , SCREAMING_SNAKE_CASE )
lowercase__ = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , SCREAMING_SNAKE_CASE )
elif groups[0] == "decoder":
lowercase__ = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , SCREAMING_SNAKE_CASE )
lowercase__ = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , SCREAMING_SNAKE_CASE )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowercase__ = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'{key} -> {new_key}' )
lowercase__ = s_dict.pop(SCREAMING_SNAKE_CASE )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowercase__ = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowercase__ = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowercase__ = s_dict[key].shape[0]
lowercase__ = s_dict[key]
for idx in range(SCREAMING_SNAKE_CASE ):
lowercase__ = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(SCREAMING_SNAKE_CASE )
return s_dict
lowerCAmelCase = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
import regex as re
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
lowercase__ = f.read()
lowercase__ = re.findall(R'''(.*) = ([0-9.]*)''' , SCREAMING_SNAKE_CASE )
lowercase__ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowercase__ = float(SCREAMING_SNAKE_CASE ) if '''.''' in value else int(SCREAMING_SNAKE_CASE )
lowercase__ = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , SCREAMING_SNAKE_CASE )[0]
lowercase__ = str(activation[1] )
lowercase__ = num_experts
lowercase__ = SwitchTransformersConfig(**SCREAMING_SNAKE_CASE )
return config
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="./" , SCREAMING_SNAKE_CASE=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
lowercase__ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
if gin_file is not None:
lowercase__ = convert_gin_to_config(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
lowercase__ = SwitchTransformersConfig.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ = SwitchTransformersForConditionalGeneration(SCREAMING_SNAKE_CASE )
lowercase__ = flax_params['''target''']
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE , sep='''/''' )
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
lowercase__ = unflatten_dict(SCREAMING_SNAKE_CASE , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
lowerCAmelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 43 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = JukeboxTokenizer
UpperCAmelCase__ = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
import torch
A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''')
A__ = tokenizer(**self.metas)['''input_ids''']
# fmt: off
A__ = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 1_069, 11]]),
torch.tensor([[0, 0, 0, 1_069, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
'''simple docstring'''
import torch
A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''')
A__ = tokenizer(**self.metas)['''input_ids''']
# fmt: off
A__ = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
| 87 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowerCamelCase = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/'))
__lowercase =self.transformer_dir
shutil.copy(
os.path.join(_lowerCamelCase , 'src/transformers/models/bert/modeling_bert.py') , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py') , )
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase ='src/transformers'
shutil.rmtree(self.transformer_dir)
def __lowerCamelCase ( self : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : str=None):
'''simple docstring'''
__lowercase =comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
__lowercase =comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
__lowercase =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9)
__lowercase =black.format_str(_lowerCamelCase , mode=_lowerCamelCase)
__lowercase =os.path.join(self.transformer_dir , 'new_code.py')
with open(_lowerCamelCase , 'w' , newline='\n') as f:
f.write(_lowerCamelCase)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowerCamelCase)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowerCamelCase)
with open(_lowerCamelCase , 'r') as f:
self.assertTrue(f.read() , _lowerCamelCase)
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead')
self.assertEqual(_lowerCamelCase , _lowerCamelCase)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , _lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , _lowerCamelCase) , )
# Copy consistency with a really long name
__lowercase ='TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub('Bert' , _lowerCamelCase , _lowerCamelCase) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , _lowerCamelCase , overwrite_result=re.sub('Bert' , 'TestModel' , _lowerCamelCase) , )
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =check_copies.LOCALIZED_READMES['README_zh-hans.md']
__lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
__lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
__lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
__lowercase , __lowercase =check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme['format_model_list'])
self.assertFalse(_lowerCamelCase)
self.assertEqual(_lowerCamelCase , _lowerCamelCase)
__lowercase , __lowercase =check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme['format_model_list'])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_lowerCamelCase)
__lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
__lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
__lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
__lowercase , __lowercase =check_copies.convert_to_localized_md(
_lowerCamelCase , _lowerCamelCase , localized_readme['format_model_list'])
# Check if the model link is synchronized.
self.assertEqual(_lowerCamelCase , _lowerCamelCase)
| 701 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger()
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = field(default_factory=A )
lowerCAmelCase__ = field(default_factory=A )
def __lowerCamelCase ( self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tensor , _lowerCAmelCase : Tensor):
'''simple docstring'''
__lowercase =len(list(m.modules())) == 1 or isinstance(_lowerCAmelCase , nn.Convad) or isinstance(_lowerCAmelCase , nn.BatchNormad)
if has_not_submodules:
self.traced.append(_lowerCAmelCase)
def __call__( self : Dict , _lowerCAmelCase : Tensor):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(_lowerCAmelCase)
[x.remove() for x in self.handles]
return self
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return list(filter(lambda _lowerCAmelCase: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 0
lowerCAmelCase__ = field(default_factory=A )
lowerCAmelCase__ = field(default_factory=A )
def __call__( self : Any , _lowerCAmelCase : Tensor):
'''simple docstring'''
__lowercase =Tracker(self.dest)(_lowerCAmelCase).parametrized
__lowercase =Tracker(self.src)(_lowerCAmelCase).parametrized
__lowercase =list(filter(lambda _lowerCAmelCase: type(_lowerCAmelCase) not in self.src_skip , _lowerCAmelCase))
__lowercase =list(filter(lambda _lowerCAmelCase: type(_lowerCAmelCase) not in self.dest_skip , _lowerCAmelCase))
if len(_lowerCAmelCase) != len(_lowerCAmelCase):
raise Exception(
f"""Numbers of operations are different. Source module has {len(_lowerCAmelCase)} operations while"""
f""" destination module has {len(_lowerCAmelCase)}.""")
for dest_m, src_m in zip(_lowerCAmelCase , _lowerCAmelCase):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""")
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True ):
"""simple docstring"""
print(f"""Converting {name}...""" )
with torch.no_grad():
__lowercase =timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ).eval()
__lowercase =ResNetForImageClassification(_lowerCAmelCase ).eval()
__lowercase =ModuleTransfer(src=_lowerCAmelCase , dest=_lowerCAmelCase )
__lowercase =torch.randn((1, 3, 224, 224) )
module_transfer(_lowerCAmelCase )
assert torch.allclose(from_model(_lowerCAmelCase ) , our_model(_lowerCAmelCase ).logits ), "The model logits don't match the original one."
__lowercase =f"""resnet{'-'.join(name.split('resnet' ) )}"""
print(_lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_lowerCAmelCase , )
# we can use the convnext one
__lowercase =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_lowerCAmelCase , )
print(f"""Pushed {checkpoint_name}""" )
def _A ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = True ):
"""simple docstring"""
__lowercase ='imagenet-1k-id2label.json'
__lowercase =1_000
__lowercase =(1, num_labels)
__lowercase ='huggingface/label-files'
__lowercase =num_labels
__lowercase =json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
__lowercase ={int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__lowercase =idalabel
__lowercase ={v: k for k, v in idalabel.items()}
__lowercase =partial(_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase )
__lowercase ={
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(_lowerCAmelCase , names_to_config[model_name] , _lowerCAmelCase , _lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase = parser.parse_args()
lowerCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 454 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase__ ) )
def lowerCAmelCase_ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
if index == len(UpperCamelCase__ ):
return True
# Recursive Step
for i in range(UpperCamelCase__ ):
if valid_coloring(graph[index] , UpperCamelCase__ , UpperCamelCase__ ):
# Color current vertex
__lowercase = i
# Validate coloring
if util_color(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , index + 1 ):
return True
# Backtrack
__lowercase = -1
return False
def lowerCAmelCase_ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int ):
"""simple docstring"""
__lowercase = [-1] * len(UpperCamelCase__ )
if util_color(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 0 ):
return colored_vertices
return []
| 616 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
__lowercase = quote(UpperCamelCase__ )
return hfh.hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" , revision=UpperCamelCase__ )
| 616 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCamelCase =argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowerCamelCase =parser.parse_args()
_lowerCamelCase ="cpu"
_lowerCamelCase ="a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowerCamelCase ="path-to-your-trained-model"
_lowerCamelCase =StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCamelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCamelCase =pipe.to(device)
# to channels last
_lowerCamelCase =pipe.unet.to(memory_format=torch.channels_last)
_lowerCamelCase =pipe.vae.to(memory_format=torch.channels_last)
_lowerCamelCase =pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCamelCase =pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCamelCase =torch.randn(2, 4, 64, 64)
_lowerCamelCase =torch.rand(1) * 9_99
_lowerCamelCase =torch.randn(2, 77, 7_68)
_lowerCamelCase =(sample, timestep, encoder_hidden_status)
try:
_lowerCamelCase =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCamelCase =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCamelCase =ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCamelCase =ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCamelCase =ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCamelCase =6_66
_lowerCamelCase =torch.Generator(device).manual_seed(seed)
_lowerCamelCase ={"generator": generator}
if args.steps is not None:
_lowerCamelCase =args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCamelCase =pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 252 |
_lowerCamelCase =[sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowerCamelCase =[None] * 10_00_00_00
_lowerCamelCase =True
_lowerCamelCase =False
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
SCREAMING_SNAKE_CASE =chain(next_number(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE =number_chain
while number < 10000000:
SCREAMING_SNAKE_CASE =number_chain
number *= 10
return number_chain
def snake_case__ ( lowerCAmelCase_ = 10000000 ):
"""simple docstring"""
for i in range(1, lowerCAmelCase_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 252 | 1 |
def __UpperCAmelCase ( __a : list[int] ) -> Optional[Any]:
"""simple docstring"""
_a : Union[str, Any] = len(lowercase_ )
for i in range(lowercase_ ):
for j in range(i + 1 ,lowercase_ ):
if numbers[j] < numbers[i]:
_a , _a : Optional[int] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a__ = input('''Enter numbers separated by a comma:\n''').strip()
a__ = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 14 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase_ : Union[str, Any] = '''\
Text data.
Second line of data.'''
lowercase_ : Dict = '''file'''
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ):
lowercase = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
lowercase = bytes(lowercase_ , """utf-8""" )
with zstd.open(lowercase_ , """wb""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
with open(os.path.join(tmpfs.local_root_dir , lowercase_ ) , """w""" ) as f:
f.write(lowercase_ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Union[str, Any] ):
lowercase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
lowercase = input_paths[compression_format]
lowercase = tmp_path / """cache"""
lowercase = DownloadConfig(cache_dir=lowercase_ , extract_compressed_file=lowercase_ )
lowercase = cached_path(lowercase_ , download_config=lowercase_ )
with open(lowercase_ ) as f:
lowercase = f.read()
with open(lowercase_ ) as f:
lowercase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[Any] ):
lowercase = """custom_cache"""
lowercase = """custom_extracted_dir"""
lowercase = tmp_path / """custom_extracted_path"""
if default_extracted:
lowercase = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , lowercase_ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowercase_ ) )
lowercase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase = xz_file
lowercase = (
DownloadConfig(extract_compressed_file=lowercase_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase_ )
)
lowercase = cached_path(lowercase_ , download_config=lowercase_ )
assert Path(lowercase_ ).parent.parts[-2:] == expected
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
# absolute path
lowercase = str(Path(lowercase_ ).resolve() )
assert cached_path(lowercase_ ) == text_file
# relative path
lowercase = str(Path(lowercase_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase_ ) == text_file
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
# absolute path
lowercase = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(lowercase_ ):
cached_path(lowercase_ )
# relative path
lowercase = """./__missing_file__.txt"""
with pytest.raises(lowercase_ ):
cached_path(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(lowercase_ ) as f:
lowercase = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
with pytest.raises(lowercase_ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase_ ):
http_get("""https://huggingface.co""" , temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase_ ):
ftp_get("""ftp://huggingface.co""" , temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
lowercase = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase_ ):
fsspec_get("""s3://huggingface.co""" , temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
fsspec_head("""s3://huggingface.co""" )
| 588 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
_UpperCAmelCase = number_of_bytes // partitions
_UpperCAmelCase = []
for i in range(UpperCamelCase__ ):
_UpperCAmelCase = i * bytes_per_partition + 1
_UpperCAmelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__SCREAMING_SNAKE_CASE :Dict = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
__SCREAMING_SNAKE_CASE :Any = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
__SCREAMING_SNAKE_CASE :Optional[Any] = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowercase ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def lowercase ( self : int , snake_case_ : Any , snake_case_ : str , snake_case_ : Dict=4 , snake_case_ : Any=False ):
_UpperCAmelCase = compute_bleu(
reference_corpus=snake_case_ , translation_corpus=snake_case_ , max_order=snake_case_ , smooth=snake_case_ )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 119 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( __snake_case ):
_UpperCamelCase = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableDiffusionLatentUpscalePipeline
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
UpperCAmelCase = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase = frozenset([] )
UpperCAmelCase = True
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = 1
_UpperCamelCase = 4
_UpperCamelCase = (16, 16)
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_A )
return image
def UpperCamelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_A , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_A , only_cross_attention=_A , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
_UpperCamelCase = EulerDiscreteScheduler(prediction_type='''sample''' )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
_UpperCamelCase = CLIPTextModel(_A )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCamelCase = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase_ ( self : Dict , _A : int , _A : List[Any]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = pipe(**_A ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_UpperCamelCase = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
_UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
def UpperCamelCase_ ( self : Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCamelCase_ ( self : int ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCamelCase_ ( self : str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCamelCase_ ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCamelCase_ ( self : Optional[int] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCamelCase_ ( self : int ):
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCamelCase_ ( self : Union[str, Any] ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = 2
_UpperCamelCase = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_UpperCamelCase = getattr(_A , scheduler_enum.name )
_UpperCamelCase = scheduler_cls.from_config(pipe.scheduler.config )
_UpperCamelCase = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch.manual_seed(33 )
_UpperCamelCase = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCamelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
_UpperCamelCase = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
_UpperCamelCase = pipe(_A , generator=_A , output_type='''latent''' ).images
_UpperCamelCase = upscaler(
prompt=_A , image=_A , num_inference_steps=20 , guidance_scale=0 , generator=_A , output_type='''np''' , ).images[0]
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = torch.manual_seed(33 )
_UpperCamelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
_UpperCamelCase = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
_UpperCamelCase = upscaler(
prompt=_A , image=_A , num_inference_steps=20 , guidance_scale=0 , generator=_A , output_type='''np''' , ).images[0]
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 10 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : str , __magic_name__ : Optional[Any]=1 , __magic_name__ : Union[str, Any]=0 , __magic_name__ : List[Any]=2 , __magic_name__ : Optional[Any]=512 , __magic_name__ : Optional[Any]="cls" , __magic_name__ : Any=False , __magic_name__ : Dict=True , **__magic_name__ : Optional[int] , ) -> Union[str, Any]:
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase_ : Union[str, Any] = project_dim
lowerCamelCase_ : Optional[Any] = pooler_fn
lowerCamelCase_ : Optional[int] = learn_encoder
lowerCamelCase_ : Any = use_attention_mask
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = [R"pooler", R"logit_scale"]
lowerCamelCase = [R"position_ids", R"predictions.decoder.bias"]
lowerCamelCase = "roberta"
lowerCamelCase = RobertaSeriesConfig
def __init__( self : List[Any] , __magic_name__ : List[str] ) -> Optional[Any]:
super().__init__(__magic_name__ )
lowerCamelCase_ : Dict = XLMRobertaModel(__magic_name__ )
lowerCamelCase_ : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
lowerCamelCase_ : Optional[Any] = getattr(__magic_name__ , "has_pre_transformation" , __magic_name__ )
if self.has_pre_transformation:
lowerCamelCase_ : Optional[Any] = nn.Linear(config.hidden_size , config.project_dim )
lowerCamelCase_ : Dict = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , ) -> Any:
lowerCamelCase_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ : List[Any] = self.base_model(
input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , position_ids=__magic_name__ , head_mask=__magic_name__ , inputs_embeds=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , output_attentions=__magic_name__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__magic_name__ , )
if self.has_pre_transformation:
lowerCamelCase_ : List[Any] = outputs["hidden_states"][-2]
lowerCamelCase_ : Any = self.pre_LN(__magic_name__ )
lowerCamelCase_ : str = self.transformation_pre(__magic_name__ )
return TransformationModelOutput(
projection_state=__magic_name__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
lowerCamelCase_ : int = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__magic_name__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 488 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class UpperCAmelCase__ ( _A ):
a : Dict = """rwkv"""
a : int = {"""max_position_embeddings""": """context_length"""}
def __init__( self , UpperCamelCase=5_0277 , UpperCamelCase=1024 , UpperCamelCase=4096 , UpperCamelCase=32 , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=1E-5 , UpperCamelCase=0 , UpperCamelCase=0 , UpperCamelCase=6 , UpperCamelCase=False , UpperCamelCase=True , **UpperCamelCase , ) -> str:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = context_length
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowerCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = rescale_every
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) | 717 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : torch.FloatTensor
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self , UpperCamelCase = 16 , UpperCamelCase = 88 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = 32 , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = "geglu" , UpperCamelCase = True , UpperCamelCase = True , ) -> List[str]:
super().__init__()
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = attention_head_dim
__lowerCAmelCase = num_attention_heads * attention_head_dim
__lowerCAmelCase = in_channels
__lowerCAmelCase = torch.nn.GroupNorm(num_groups=UpperCamelCase , num_channels=UpperCamelCase , eps=1E-6 , affine=UpperCamelCase )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
# 3. Define transformers blocks
__lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , cross_attention_dim=UpperCamelCase , activation_fn=UpperCamelCase , attention_bias=UpperCamelCase , double_self_attention=UpperCamelCase , norm_elementwise_affine=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
__lowerCAmelCase = nn.Linear(UpperCamelCase , UpperCamelCase )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=1 , UpperCamelCase=None , UpperCamelCase = True , ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_states.shape
__lowerCAmelCase = batch_frames // num_frames
__lowerCAmelCase = hidden_states
__lowerCAmelCase = hidden_states[None, :].reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__lowerCAmelCase = self.norm(UpperCamelCase )
__lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = self.proj_in(UpperCamelCase )
# 2. Blocks
for block in self.transformer_blocks:
__lowerCAmelCase = block(
UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase , cross_attention_kwargs=UpperCamelCase , class_labels=UpperCamelCase , )
# 3. Output
__lowerCAmelCase = self.proj_out(UpperCamelCase )
__lowerCAmelCase = (
hidden_states[None, None, :]
.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__lowerCAmelCase = hidden_states.reshape(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCamelCase ) | 39 | 0 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __lowercase ( A ):
'''simple docstring'''
def __init__( self : int , _a : List[str] , _a : List[Any] , _a : Dict=1_024 , _a : Optional[Any]=1_024 , _a : str=3.6 ):
UpperCamelCase__ = tokenizer
UpperCamelCase__ = tokenizer.bos_token_id
UpperCamelCase__ = dataset
UpperCamelCase__ = seq_length
UpperCamelCase__ = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Union[str, Any] ):
UpperCamelCase__ = iter(self.dataset )
UpperCamelCase__ = True
while more_examples:
UpperCamelCase__ , UpperCamelCase__ = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCamelCase_ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
UpperCamelCase__ = False
break
UpperCamelCase__ = tokenizer(lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
UpperCamelCase__ = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCamelCase_ ) , self.seq_length ):
UpperCamelCase__ = all_token_ids[i : i + self.seq_length]
if len(lowerCamelCase_ ) == self.seq_length:
yield torch.tensor(lowerCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ = {'''streaming''': True}
UpperCamelCase__ = load_dataset(args.dataset_name, split='''train''', **a__ )
UpperCamelCase__ = ConstantLengthDataset(a__, a__, seq_length=args.seq_length )
UpperCamelCase__ = DataLoader(a__, batch_size=args.batch_size )
return eval_dataloader
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
model.eval()
UpperCamelCase__ = []
for step, batch in enumerate(a__ ):
with torch.no_grad():
UpperCamelCase__ = model(a__, labels=a__ )
UpperCamelCase__ = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(a__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCamelCase__ = torch.mean(torch.cat(a__ ) )
try:
UpperCamelCase__ = torch.exp(a__ )
except OverflowError:
UpperCamelCase__ = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowercase = Accelerator()
# Parse configuration
lowercase = HfArgumentParser(EvaluationArguments)
lowercase = parser.parse_args()
set_seed(args.seed)
# Logging
lowercase = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
lowercase = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowercase = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowercase = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowercase , lowercase = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
lowercase , lowercase = evaluate(args)
logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 240 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = 3_84
_UpperCamelCase = 2
_UpperCamelCase = 4
_UpperCamelCase = 37
_UpperCamelCase = "gelu"
_UpperCamelCase = 0.1
_UpperCamelCase = 0.1
_UpperCamelCase = 5_12
_UpperCamelCase = 16
_UpperCamelCase = 2
_UpperCamelCase = 0.02
_UpperCamelCase = 3
_UpperCamelCase = 4
_UpperCamelCase = 1_28
_UpperCamelCase = 2
_UpperCamelCase = 9
_UpperCamelCase = 1
_UpperCamelCase = None
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = TFConvBertModel(config=lowerCamelCase_ )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = TFConvBertForMaskedLM(config=lowerCamelCase_ )
_UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFConvBertForSequenceClassification(config=lowerCamelCase_ )
_UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.num_choices
_UpperCamelCase = TFConvBertForMultipleChoice(config=lowerCamelCase_ )
_UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFConvBertForTokenClassification(config=lowerCamelCase_ )
_UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
"""simple docstring"""
_UpperCamelCase = TFConvBertForQuestionAnswering(config=lowerCamelCase_ )
_UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowercase , lowercase , unittest.TestCase ):
__lowercase : int = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase : int = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : Any = False
__lowercase : List[str] = False
__lowercase : List[Any] = False
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFConvBertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowercase ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
_UpperCamelCase = True
if hasattr(lowerCamelCase_ , "use_cache" ):
_UpperCamelCase = True
_UpperCamelCase = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_UpperCamelCase = getattr(self.model_tester , "key_length" , lowerCamelCase_ )
for model_class in self.all_model_classes:
_UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = len(model(lowerCamelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
_UpperCamelCase = os.path.join(lowerCamelCase_ , "saved_model" , "1" )
_UpperCamelCase = tf.keras.models.load_model(lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
if self.is_encoder_decoder:
_UpperCamelCase = outputs["encoder_hidden_states"]
_UpperCamelCase = outputs["encoder_attentions"]
else:
_UpperCamelCase = outputs["hidden_states"]
_UpperCamelCase = outputs["attentions"]
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
_UpperCamelCase = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
_UpperCamelCase = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_UpperCamelCase = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_UpperCamelCase = getattr(self.model_tester , "key_length" , lowerCamelCase_ )
_UpperCamelCase = getattr(self.model_tester , "key_length" , lowerCamelCase_ )
def check_decoder_attentions_output(lowerCamelCase_ ):
_UpperCamelCase = len(lowerCamelCase_ )
self.assertEqual(out_len % 2 , 0 )
_UpperCamelCase = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase_ ):
_UpperCamelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_UpperCamelCase = len(lowerCamelCase_ )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
if self.is_encoder_decoder:
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_decoder_attentions_output(lowerCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase = True
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
# Check attention is always last and order is fine
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = model_class(lowerCamelCase_ )
_UpperCamelCase = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(lowerCamelCase_ )[0]
_UpperCamelCase = [1, 6, 7_68]
self.assertEqual(output.shape , lowerCamelCase_ )
_UpperCamelCase = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 147 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = ["""image_processor""", """feature_extractor"""]
__lowerCAmelCase = """TvltImageProcessor"""
__lowerCAmelCase = """TvltFeatureExtractor"""
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
super().__init__(image_processor=snake_case_ , feature_extractor=snake_case_ )
__UpperCAmelCase: Union[str, Any] = image_processor
__UpperCAmelCase: List[str] = feature_extractor
def __call__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , *snake_case_ , **snake_case_ , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__UpperCAmelCase: Dict = None
if images is not None:
__UpperCAmelCase: Tuple = self.image_processor(snake_case_ , mask_pixel=snake_case_ , *snake_case_ , **snake_case_ )
if images_mixed is not None:
__UpperCAmelCase: Any = self.image_processor(snake_case_ , is_mixed=snake_case_ , *snake_case_ , **snake_case_ )
if audio is not None:
__UpperCAmelCase: str = self.feature_extractor(
snake_case_ , *snake_case_ , sampling_rate=snake_case_ , mask_audio=snake_case_ , **snake_case_ )
__UpperCAmelCase: str = {}
if audio is not None:
output_dict.update(snake_case_ )
if images is not None:
output_dict.update(snake_case_ )
if images_mixed_dict is not None:
output_dict.update(snake_case_ )
return output_dict
@property
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.image_processor.model_input_names
__UpperCAmelCase: Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) ) | 714 | '''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = JukeboxTokenizer
__lowerCAmelCase = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def lowercase_ ( self ):
'''simple docstring'''
import torch
__UpperCAmelCase: Dict = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
__UpperCAmelCase: List[str] = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
__UpperCAmelCase: int = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowercase_ ( self ):
'''simple docstring'''
import torch
__UpperCAmelCase: Any = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
__UpperCAmelCase: Tuple = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
__UpperCAmelCase: int = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) | 466 | 0 |
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
lowerCamelCase = str(bin(snake_case__ ) )
binary_number += "0" * shift_amount
return binary_number
def a__ ( snake_case__ , snake_case__ ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
lowerCamelCase = str(bin(snake_case__ ) )[2:]
if shift_amount >= len(snake_case__ ):
return "0b0"
lowerCamelCase = binary_number[: len(snake_case__ ) - shift_amount]
return "0b" + shifted_binary_number
def a__ ( snake_case__ , snake_case__ ) -> str:
if number >= 0: # Get binary representation of positive number
lowerCamelCase = """0""" + str(bin(snake_case__ ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
lowerCamelCase = len(bin(snake_case__ )[3:] ) # Find 2's complement of number
lowerCamelCase = bin(abs(snake_case__ ) - (1 << binary_number_length) )[3:]
lowerCamelCase = (
"""1""" + """0""" * (binary_number_length - len(snake_case__ )) + binary_number
)
if shift_amount >= len(snake_case__ ):
return "0b" + binary_number[0] * len(snake_case__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(snake_case__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 543 |
"""simple docstring"""
def a__ ( ) -> int:
return 1
def a__ ( snake_case__ ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def a__ ( snake_case__ ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(snake_case__ )
def a__ ( snake_case__ ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(snake_case__ )
def a__ ( snake_case__ ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(snake_case__ )
def a__ ( snake_case__ ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(snake_case__ )
def a__ ( snake_case__ ) -> int:
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(snake_case__ )
def a__ ( snake_case__ ) -> int:
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(snake_case__ )
def a__ ( snake_case__ = 2_00 ) -> int:
return two_pound(snake_case__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 543 | 1 |
'''simple docstring'''
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCAmelCase = True
for i in range(0 , len(_lowerCAmelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
for i in range(1 , len(_lowerCAmelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
__lowerCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
__lowerCAmelCase = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 714 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = """"""
__SCREAMING_SNAKE_CASE : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__SCREAMING_SNAKE_CASE : str = None # compression type in fsspec. ex: "gzip"
__SCREAMING_SNAKE_CASE : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Tuple , __UpperCamelCase : str = "" , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Optional[dict] = None , **__UpperCamelCase : Union[str, Any] ):
super().__init__(self , **__UpperCamelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_UpperCAmelCase = fsspec.open(
__UpperCamelCase , mode="rb" , protocol=__UpperCamelCase , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_UpperCAmelCase = os.path.basename(self.file.path.split("::" )[0] )
_UpperCAmelCase = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
_UpperCAmelCase = None
@classmethod
def UpperCAmelCase__ ( cls : str , __UpperCamelCase : Union[str, Any] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__UpperCamelCase ).lstrip("/" )
def UpperCAmelCase__ ( self : List[Any] ):
if self.dir_cache is None:
_UpperCAmelCase = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
_UpperCAmelCase = {f["name"]: f}
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : str ):
return self.file.open().read()
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=None , **__UpperCamelCase : List[Any] , ):
_UpperCAmelCase = self._strip_protocol(__UpperCamelCase )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : int = """bz2"""
__SCREAMING_SNAKE_CASE : int = """bz2"""
__SCREAMING_SNAKE_CASE : Optional[Any] = """.bz2"""
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[Any] = """gzip"""
__SCREAMING_SNAKE_CASE : int = """gzip"""
__SCREAMING_SNAKE_CASE : List[str] = """.gz"""
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Any = """lz4"""
__SCREAMING_SNAKE_CASE : int = """lz4"""
__SCREAMING_SNAKE_CASE : Dict = """.lz4"""
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Tuple = """xz"""
__SCREAMING_SNAKE_CASE : int = """xz"""
__SCREAMING_SNAKE_CASE : Dict = """.xz"""
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : List[str] = """zstd"""
__SCREAMING_SNAKE_CASE : List[Any] = """zstd"""
__SCREAMING_SNAKE_CASE : Optional[int] = """.zst"""
def __init__( self : Any , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Optional[dict] = None , __UpperCamelCase : int = DEFAULT_BLOCK_SIZE , **__UpperCamelCase : Optional[Any] , ):
super().__init__(
fo=__UpperCamelCase , mode=__UpperCamelCase , target_protocol=__UpperCamelCase , target_options=__UpperCamelCase , block_size=__UpperCamelCase , **__UpperCamelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_UpperCAmelCase = self.file.__enter__
class __SCREAMING_SNAKE_CASE :
def __init__( self : int , __UpperCamelCase : Optional[Any] ):
_UpperCAmelCase = file_
def __enter__( self : str ):
self._file.__enter__()
return self
def __exit__( self : Optional[int] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Dict ):
self._file.__exit__(*__UpperCamelCase , **__UpperCamelCase )
def __iter__( self : Any ):
return iter(self._file )
def UpperCAmelCase__ ( self : List[Any] ):
return next(self._file )
def __getattr__( self : List[Any] , __UpperCamelCase : Optional[Any] ):
return getattr(self._file , __UpperCamelCase )
def fixed_enter(*__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ):
return WrappedFile(_enter(*__UpperCamelCase , **__UpperCamelCase ) )
_UpperCAmelCase = fixed_enter
| 129 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=7 , lowercase_ : str=3 , lowercase_ : Tuple=18 , lowercase_ : Tuple=30 , lowercase_ : Optional[Any]=400 , lowercase_ : int=True , lowercase_ : List[Any]=None , lowercase_ : List[Any]=True , lowercase_ : int=None , lowercase_ : Optional[Any]=True , lowercase_ : Optional[int]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , lowercase_ : int=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , lowercase_ : List[Any]=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = size if size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE_ : Optional[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE_ : Dict = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = image_size
SCREAMING_SNAKE_CASE_ : List[Any] = min_resolution
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE_ : Optional[Any] = size
SCREAMING_SNAKE_CASE_ : Tuple = do_center_crop
SCREAMING_SNAKE_CASE_ : int = crop_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Dict = image_mean
SCREAMING_SNAKE_CASE_ : Tuple = image_std
SCREAMING_SNAKE_CASE_ : Optional[int] = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=False , lowercase_ : Tuple=False):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
SCREAMING_SNAKE_CASE_ : Any = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
SCREAMING_SNAKE_CASE_ : Dict = []
for i in range(self.batch_size):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
SCREAMING_SNAKE_CASE_ : Optional[int] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
if torchify:
SCREAMING_SNAKE_CASE_ : int = [torch.from_numpy(lowercase_) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=lowercase_)
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase_ , '''do_resize'''))
self.assertTrue(hasattr(lowercase_ , '''size'''))
self.assertTrue(hasattr(lowercase_ , '''do_center_crop'''))
self.assertTrue(hasattr(lowercase_ , '''center_crop'''))
self.assertTrue(hasattr(lowercase_ , '''do_normalize'''))
self.assertTrue(hasattr(lowercase_ , '''image_mean'''))
self.assertTrue(hasattr(lowercase_ , '''image_std'''))
self.assertTrue(hasattr(lowercase_ , '''do_convert_rgb'''))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE_ : str = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processing(lowercase_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase_ , numpify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE_ : str = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(lowercase_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase_ , torchify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(lowercase_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 3
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase_ , '''do_resize'''))
self.assertTrue(hasattr(lowercase_ , '''size'''))
self.assertTrue(hasattr(lowercase_ , '''do_center_crop'''))
self.assertTrue(hasattr(lowercase_ , '''center_crop'''))
self.assertTrue(hasattr(lowercase_ , '''do_normalize'''))
self.assertTrue(hasattr(lowercase_ , '''image_mean'''))
self.assertTrue(hasattr(lowercase_ , '''image_std'''))
self.assertTrue(hasattr(lowercase_ , '''do_convert_rgb'''))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE_ : Any = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(lowercase_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 512 |
"""simple docstring"""
def _A (__a ) -> list[list]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = current_set.copy()
for row_index, row in enumerate(__a ):
SCREAMING_SNAKE_CASE_ : Tuple = row[0]
for column_index, column in enumerate(__a ):
if magnitude == 0:
SCREAMING_SNAKE_CASE_ : int = column
continue
SCREAMING_SNAKE_CASE_ : str = column / magnitude
# Subtract to cancel term
SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_set[0]
SCREAMING_SNAKE_CASE_ : int = [first_row]
SCREAMING_SNAKE_CASE_ : Dict = current_set[1::]
for row in current_set:
SCREAMING_SNAKE_CASE_ : Optional[int] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__a )
continue
for column_index in range(len(__a ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__a )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
SCREAMING_SNAKE_CASE_ : Any = final_set[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : int = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = simplify(__a )
for i in range(len(__a ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __a )
SCREAMING_SNAKE_CASE_ : List[str] = resultant
return final_set
def _A (__a ) -> list:
"""simple docstring"""
if len(__a ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
SCREAMING_SNAKE_CASE_ : List[Any] = len(__a ) + 1
if any(len(__a ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(__a , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(__a ) == 1:
return [equations[0][-1] / equations[0][0]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = equations.copy()
if any(0 in row for row in data_set ):
SCREAMING_SNAKE_CASE_ : int = data_set.copy()
SCREAMING_SNAKE_CASE_ : Any = []
for row_index, row in enumerate(__a ):
if 0 not in row:
SCREAMING_SNAKE_CASE_ : Optional[int] = data_set.pop(__a )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , __a )
SCREAMING_SNAKE_CASE_ : Dict = data_set.copy()
SCREAMING_SNAKE_CASE_ : Any = simplify(__a )
SCREAMING_SNAKE_CASE_ : List[str] = simplified[::-1]
SCREAMING_SNAKE_CASE_ : list = []
for row in simplified:
SCREAMING_SNAKE_CASE_ : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
SCREAMING_SNAKE_CASE_ : Dict = row.copy()[: len(__a ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__a ) == 0:
solutions.append(0 )
continue
SCREAMING_SNAKE_CASE_ : Optional[int] = temp_row[1::]
SCREAMING_SNAKE_CASE_ : Tuple = temp_row[::-1]
for column_index, column in enumerate(__a ):
current_solution -= column * solutions[column_index]
solutions.append(__a )
SCREAMING_SNAKE_CASE_ : int = []
for item in solutions:
final.append(float(round(__a , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 512 | 1 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def __magic_name__ ( __UpperCAmelCase ) -> datetime:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = year % 19
__SCREAMING_SNAKE_CASE = year % 4
__SCREAMING_SNAKE_CASE = year % 7
__SCREAMING_SNAKE_CASE = math.floor(year / 100 )
__SCREAMING_SNAKE_CASE = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__SCREAMING_SNAKE_CASE = leap_day_inhibits / 4
__SCREAMING_SNAKE_CASE = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__SCREAMING_SNAKE_CASE = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__SCREAMING_SNAKE_CASE = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__SCREAMING_SNAKE_CASE = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 18 )
else:
return datetime(__UpperCAmelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
a = "will be" if year > datetime.now().year else "was"
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 13 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a = logging.getLogger(__name__)
@dataclass
class __a :
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, )
__UpperCamelCase : int = field(
default=1024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the training data.'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} )
__UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __a :
__UpperCamelCase : str = field(
default=_snake_case, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
__UpperCamelCase : str = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1]
__SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__SCREAMING_SNAKE_CASE = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__SCREAMING_SNAKE_CASE = load_dataset("""csv""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__SCREAMING_SNAKE_CASE = load_dataset("""json""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__SCREAMING_SNAKE_CASE = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1}
__SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__UpperCAmelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__SCREAMING_SNAKE_CASE = examples["""statement"""]
__SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__SCREAMING_SNAKE_CASE = raw_datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__UpperCAmelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __UpperCAmelCase ) else p.predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = default_data_collator
elif training_args.fpaa:
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 )
else:
__SCREAMING_SNAKE_CASE = None
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __UpperCAmelCase )
trainer.save_metrics("""train""" , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics("""eval""" , __UpperCAmelCase )
trainer.save_metrics("""eval""" , __UpperCAmelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" )
__SCREAMING_SNAKE_CASE = trainer.predict(__UpperCAmelCase , metric_key_prefix="""predict""" ).predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
__SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 13 | 1 |
from datetime import datetime
import requests
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__UpperCamelCase = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(_lowercase ).content
if __name__ == "__main__":
__snake_case = input('''Enter Video/IGTV url: ''').strip()
__snake_case = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE: ArgumentParser) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict:
"""simple docstring"""
raise NotImplementedError() | 293 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
a = logging.get_logger(__name__)
a = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class UpperCamelCase__ ( __magic_name__ ):
@add_start_docstrings(UpperCamelCase__ )
def __call__( self : Dict , UpperCamelCase__ : torch.LongTensor , UpperCamelCase__ : torch.FloatTensor , **UpperCamelCase__ : int ):
'''simple docstring'''
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None ):
'''simple docstring'''
lowercase_ = max_length
lowercase_ = max_position_embeddings
@add_start_docstrings(UpperCamelCase__ )
def __call__( self : Union[str, Any] , UpperCamelCase__ : torch.LongTensor , UpperCamelCase__ : torch.FloatTensor , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = input_ids.shape[-1]
lowercase_ = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"""with `max_length = start_length + max_new_tokens` instead.""" , UpperCamelCase__ , )
lowercase_ = start_length
lowercase_ = max_new_tokens
lowercase_ = start_length + max_new_tokens
@add_start_docstrings(UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : torch.LongTensor , UpperCamelCase__ : torch.FloatTensor , **UpperCamelCase__ : int ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : Tuple , UpperCamelCase__ : float , UpperCamelCase__ : Optional[float] = None ):
'''simple docstring'''
lowercase_ = max_time
lowercase_ = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(UpperCamelCase__ )
def __call__( self : List[str] , UpperCamelCase__ : torch.LongTensor , UpperCamelCase__ : torch.FloatTensor , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class UpperCamelCase__ ( __magic_name__ ):
@add_start_docstrings(UpperCamelCase__ )
def __call__( self : List[Any] , UpperCamelCase__ : torch.LongTensor , UpperCamelCase__ : torch.FloatTensor , **UpperCamelCase__ : int ):
'''simple docstring'''
return any(criteria(UpperCamelCase__ , UpperCamelCase__ ) for criteria in self )
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return stopping_criterium.max_length
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return stopping_criterium.max_length
return None
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = stopping_criteria.max_length
lowercase_ = deepcopy(UpperCAmelCase__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , UpperCAmelCase__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=UpperCAmelCase__ ) )
return new_stopping_criteria
| 715 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
for attribute in key.split(""".""" ):
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase_ = None
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ = True
elif name.split(""".""" )[0] == "proj":
lowercase_ = fairseq_model.proj
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
lowercase_ = mapped_key.replace("""*""" , UpperCAmelCase__ )
if "weight_g" in name:
lowercase_ = """weight_g"""
elif "weight_v" in name:
lowercase_ = """weight_v"""
elif "bias" in name:
lowercase_ = """bias"""
elif "weight" in name:
lowercase_ = """weight"""
else:
lowercase_ = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = full_name.split("""conv_layers.""" )[-1]
lowercase_ = name.split(""".""" )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ , lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
lowercase_ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( UpperCAmelCase__ ):
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.split(""" """ )[0] for line in lines]
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
lowercase_ = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase__ , vocab_size=UpperCAmelCase__ , decoder_layers=UpperCAmelCase__ , do_stable_layer_norm=UpperCAmelCase__ )
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowercase_ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase_ = WavaVecaModel(UpperCAmelCase__ )
lowercase_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ )
lowercase_ = SpeechaTextaForCausalLM(UpperCAmelCase__ )
lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowercase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
lowercase_ = False
# add projection layer
lowercase_ = nn.Parameter(projection_layer.weight )
lowercase_ = nn.Parameter(projection_layer.bias )
lowercase_ = create_vocab_dict(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase__ , """vocab.json""" ) )
tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase_ = hf_wavavec.config.to_dict()
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer.eos_token_id
lowercase_ = """speech_to_text_2"""
lowercase_ = """wav2vec2"""
lowercase_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
feature_extractor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 650 | 0 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_UpperCAmelCase : List[Any] = data_utils.TransfoXLTokenizer
_UpperCAmelCase : int = data_utils.TransfoXLCorpus
_UpperCAmelCase : Tuple = data_utils
_UpperCAmelCase : List[str] = data_utils
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase ,'''rb''' ) as fp:
_UpperCamelCase : Optional[Any] = pickle.load(UpperCamelCase ,encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCamelCase : Union[str, Any] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' )
_UpperCamelCase : List[str] = corpus.vocab.__dict__
torch.save(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Any = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' ,UpperCamelCase )
_UpperCamelCase : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCamelCase ,UpperCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCamelCase : int = os.path.abspath(UpperCamelCase )
_UpperCamelCase : Tuple = os.path.abspath(UpperCamelCase )
print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCamelCase : Optional[Any] = TransfoXLConfig()
else:
_UpperCamelCase : str = TransfoXLConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
_UpperCamelCase : List[str] = TransfoXLLMHeadModel(UpperCamelCase )
_UpperCamelCase : List[Any] = load_tf_weights_in_transfo_xl(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# Save pytorch-model
_UpperCamelCase : Optional[Any] = os.path.join(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Dict = os.path.join(UpperCamelCase ,UpperCamelCase )
print(f'''Save PyTorch model to {os.path.abspath(UpperCamelCase )}''' )
torch.save(model.state_dict() ,UpperCamelCase )
print(f'''Save configuration file to {os.path.abspath(UpperCamelCase )}''' )
with open(UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 683 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : str = ['image_processor', 'tokenizer']
A__ : Dict = 'CLIPImageProcessor'
A__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
_UpperCamelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_UpperCamelCase : str = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 | 1 |
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=A__ ):
UpperCamelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *a__ , **a__):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def snake_case_ ( cls , *a__ , **a__):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def snake_case_ ( cls , *a__ , **a__):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
class _UpperCAmelCase ( metaclass=A__ ):
UpperCamelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *a__ , **a__):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def snake_case_ ( cls , *a__ , **a__):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def snake_case_ ( cls , *a__ , **a__):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
class _UpperCAmelCase ( metaclass=A__ ):
UpperCamelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *a__ , **a__):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def snake_case_ ( cls , *a__ , **a__):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def snake_case_ ( cls , *a__ , **a__):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
class _UpperCAmelCase ( metaclass=A__ ):
UpperCamelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *a__ , **a__):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def snake_case_ ( cls , *a__ , **a__):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def snake_case_ ( cls , *a__ , **a__):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
class _UpperCAmelCase ( metaclass=A__ ):
UpperCamelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *a__ , **a__):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def snake_case_ ( cls , *a__ , **a__):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def snake_case_ ( cls , *a__ , **a__):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
class _UpperCAmelCase ( metaclass=A__ ):
UpperCamelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *a__ , **a__):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def snake_case_ ( cls , *a__ , **a__):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
@classmethod
def snake_case_ ( cls , *a__ , **a__):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''])
| 708 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( )-> str:
A__ = 1_0
A__ = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
A__ = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [9_7], '''text''': ['''1976''']}] * 1_0,
'''id''': list(range(UpperCamelCase_ ) ),
} , features=UpperCamelCase_ , )
return dataset
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int )-> Optional[Any]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=UpperCamelCase_ )
return filename
# FILE_CONTENT + files
_lowercase = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : str )-> List[Any]:
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
A__ = FILE_CONTENT
with open(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ )
return filename
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] )-> Optional[Any]:
import bza
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
A__ = bytes(UpperCamelCase_ , '''utf-8''' )
with bza.open(UpperCamelCase_ , '''wb''' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] )-> int:
import gzip
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
A__ = bytes(UpperCamelCase_ , '''utf-8''' )
with gzip.open(UpperCamelCase_ , '''wb''' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : str )-> Any:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
A__ = bytes(UpperCamelCase_ , '''utf-8''' )
with lza.frame.open(UpperCamelCase_ , '''wb''' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple )-> int:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(UpperCamelCase_ , '''w''' ) as archive:
archive.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Tuple , UpperCamelCase_ : int )-> Optional[Any]:
import tarfile
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(UpperCamelCase_ , '''w''' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] )-> str:
import lzma
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
A__ = bytes(UpperCamelCase_ , '''utf-8''' )
with lzma.open(UpperCamelCase_ , '''wb''' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] )-> List[str]:
import zipfile
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Tuple )-> str:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
A__ = bytes(UpperCamelCase_ , '''utf-8''' )
with zstd.open(UpperCamelCase_ , '''wb''' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] )-> int:
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
A__ = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ )
return filename
_lowercase = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_lowercase = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_lowercase = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_lowercase = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_lowercase = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( )-> str:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : List[Any] )-> List[str]:
A__ = datasets.Dataset.from_dict(UpperCamelCase_ )
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Dict )-> List[str]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(UpperCamelCase_ ) ) as con:
A__ = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] )-> Tuple:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(UpperCamelCase_ , '''w''' , newline='''''' ) as f:
A__ = csv.DictWriter(UpperCamelCase_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Dict )-> List[Any]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(UpperCamelCase_ , '''w''' , newline='''''' ) as f:
A__ = csv.DictWriter(UpperCamelCase_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] )-> List[str]:
import bza
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(UpperCamelCase_ , '''rb''' ) as f:
A__ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCamelCase_ , '''wb''' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] )-> str:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Any )-> List[Any]:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] )-> Tuple:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Dict )-> Optional[int]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
A__ = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(UpperCamelCase_ , '''wb''' ) as f:
A__ = pq.ParquetWriter(UpperCamelCase_ , schema=UpperCamelCase_ )
A__ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCamelCase_ ) )] for k in DATA[0]} , schema=UpperCamelCase_ )
writer.write_table(UpperCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] )-> str:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
A__ = {'''data''': DATA}
with open(UpperCamelCase_ , '''w''' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] )-> Optional[Any]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
A__ = {'''data''': DATA_DICT_OF_LISTS}
with open(UpperCamelCase_ , '''w''' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Dict )-> List[Any]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(UpperCamelCase_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : str )-> Tuple:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(UpperCamelCase_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : int )-> Optional[int]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(UpperCamelCase_ , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : int )-> Union[str, Any]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(UpperCamelCase_ , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str )-> List[str]:
import gzip
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(UpperCamelCase_ , '''rb''' ) as orig_file:
with gzip.open(UpperCamelCase_ , '''wb''' ) as zipped_file:
zipped_file.writelines(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] )-> List[Any]:
import gzip
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(UpperCamelCase_ , '''rb''' ) as orig_file:
with gzip.open(UpperCamelCase_ , '''wb''' ) as zipped_file:
zipped_file.writelines(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] )-> Optional[int]:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] )-> Union[str, Any]:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('''nested''' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any )-> Any:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : str )-> Tuple:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(UpperCamelCase_ , '''w''' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] )-> str:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(UpperCamelCase_ , '''w''' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.join('''nested''' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Any )-> List[str]:
A__ = ['''0''', '''1''', '''2''', '''3''']
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(UpperCamelCase_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : int )-> Dict:
A__ = ['''0''', '''1''', '''2''', '''3''']
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(UpperCamelCase_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] )-> Union[str, Any]:
A__ = ['''0''', '''1''', '''2''', '''3''']
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(UpperCamelCase_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] )-> List[str]:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] )-> Tuple:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple )-> Any:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(UpperCamelCase_ , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Dict )-> Any:
A__ = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( )-> Tuple:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( )-> Dict:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] )-> List[Any]:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] )-> str:
A__ = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 1_0 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 1_0 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
return data_dir
| 526 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase ={
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 337 |
"""simple docstring"""
def __a ( A ) -> List[str]:
'''simple docstring'''
A__ = [0] * len(A )
A__ = []
A__ = []
A__ = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(A ) ):
if indegree[i] == 0:
queue.append(A )
while queue:
A__ = queue.pop(0 )
cnt += 1
topo.append(A )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(A )
if cnt != len(A ):
print("Cycle exists" )
else:
print(A )
# Adjacency List of Graph
__UpperCAmelCase ={0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph) | 337 | 1 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = None ,):
"""simple docstring"""
_UpperCAmelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
_UpperCAmelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
_UpperCAmelCase = format_type
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = None ):
"""simple docstring"""
_UpperCAmelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_UpperCAmelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
UpperCAmelCase__ = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
UpperCAmelCase__ = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
UpperCAmelCase__ = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __UpperCAmelCase ( lowercase ,**lowercase ):
"""simple docstring"""
_UpperCAmelCase = get_format_type_from_alias(lowercase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowercase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 275 | """simple docstring"""
import torch
from torch import nn
class a ( nn.Module ):
def __init__( self : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Tuple=False ):
super().__init__()
_UpperCAmelCase = n_token
_UpperCAmelCase = d_embed
_UpperCAmelCase = d_proj
_UpperCAmelCase = cutoffs + [n_token]
_UpperCAmelCase = [0] + self.cutoffs
_UpperCAmelCase = div_val
_UpperCAmelCase = self.cutoffs[0]
_UpperCAmelCase = len(self.cutoffs ) - 1
_UpperCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
_UpperCAmelCase = nn.ModuleList()
_UpperCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
else:
self.out_projs.append(__lowerCAmelCase )
self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) )
_UpperCAmelCase = keep_order
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
if proj is None:
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() )
_UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Union[str, Any]=False ):
if labels is not None:
# Shift so that tokens < n predict n
_UpperCAmelCase = hidden[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
_UpperCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
_UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_UpperCAmelCase = labels != -100
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
_UpperCAmelCase = (
-nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase = self.out_layers[i].weight
_UpperCAmelCase = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
if labels is None:
_UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
_UpperCAmelCase = 0
_UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
_UpperCAmelCase , _UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_UpperCAmelCase = (labels >= l_idx) & (labels < r_idx)
_UpperCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_UpperCAmelCase = labels.index_select(0 , __lowerCAmelCase ) - l_idx
_UpperCAmelCase = head_logprob.index_select(0 , __lowerCAmelCase )
_UpperCAmelCase = hidden.index_select(0 , __lowerCAmelCase )
else:
_UpperCAmelCase = hidden
if i == 0:
if labels is not None:
_UpperCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_UpperCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_UpperCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] ):
if self.n_clusters == 0:
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase = self.out_layers[i].weight
_UpperCAmelCase = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
_UpperCAmelCase , _UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
_UpperCAmelCase = head_logprob[:, -i] + tail_logprob_i
_UpperCAmelCase = logprob_i
return out
| 275 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """▁"""
_lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
_lowerCAmelCase = {
"""facebook/xglm-564M""": 2_0_4_8,
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="</s>" ,_A="</s>" ,_A="<s>" ,_A="<unk>" ,_A="<pad>" ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_lowerCAmelCase : Optional[int] = 7
_lowerCAmelCase : List[Any] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
_lowerCAmelCase : Dict = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,cls_token=_A ,pad_token=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
_lowerCAmelCase : Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase : Union[str, Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
_lowerCAmelCase : int = len(self.sp_model )
_lowerCAmelCase : int = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_A )
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.__dict__.copy()
_lowerCAmelCase : str = None
_lowerCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_lowerCAmelCase : List[str] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A ))
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Any = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.encode(_A ,out_type=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : List[str] = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : Optional[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 259 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_lowerCAmelCase = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
_lowerCAmelCase = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """ Hello world! cécé herlolip"""
_lowerCAmelCase = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = dct.pop(_lowerCamelCase )
_lowerCAmelCase : int = val
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = torch.load(_lowerCamelCase , map_location='cpu' )
_lowerCAmelCase : Optional[Any] = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = emb.weight.shape
_lowerCAmelCase : Union[str, Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if not os.path.exists(_lowerCamelCase ):
_lowerCAmelCase : int = torch.hub.load('pytorch/fairseq' , _lowerCamelCase ).eval()
else:
_lowerCAmelCase : List[Any] = load_xsum_checkpoint(_lowerCamelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_lowerCAmelCase : Union[str, Any] = checkpoint_path.replace('.' , '-' )
_lowerCAmelCase : Optional[int] = BartConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : str = bart.encode(_lowerCamelCase ).unsqueeze(0 )
_lowerCAmelCase : Union[str, Any] = BartTokenizer.from_pretrained(_lowerCamelCase ).encode(_lowerCamelCase , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(_lowerCamelCase , _lowerCamelCase ).all():
raise ValueError(
f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
_lowerCAmelCase : List[Any] = bart.state_dict()
remove_ignore_keys_(_lowerCamelCase )
_lowerCAmelCase : List[Any] = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = BartForSequenceClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = bart.predict('mnli' , _lowerCamelCase , return_logits=_lowerCamelCase )
_lowerCAmelCase : int = model(_lowerCamelCase )[0] # logits
else: # no classification heads to worry about
_lowerCAmelCase : Optional[Any] = bart.model.state_dict()
remove_ignore_keys_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = state_dict['decoder.embed_tokens.weight']
_lowerCAmelCase : int = bart.extract_features(_lowerCamelCase )
if hf_checkpoint_name == "facebook/bart-large":
_lowerCAmelCase : Tuple = BartModel(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
_lowerCAmelCase : Any = model(_lowerCamelCase ).model[0]
else:
_lowerCAmelCase : Any = BartForConditionalGeneration(_lowerCamelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(_lowerCamelCase )
if hasattr(_lowerCamelCase , 'lm_head' ):
_lowerCAmelCase : int = make_linear_from_emb(model.model.shared )
_lowerCAmelCase : Any = model.model(_lowerCamelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
_lowerCAmelCase = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 259 | 1 |
def lowerCamelCase_ ( _lowercase ) -> str:
__A : List[Any] = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCamelCase_ ( _lowercase ) -> dict[str, str]:
__A : List[Any] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__A : Union[str, Any] = remove_duplicates(key.upper() )
__A : Dict = len(_lowercase )
# First fill cipher with key characters
__A : int = {alphabet[i]: char for i, char in enumerate(_lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowercase ) , 26 ):
__A : Dict = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__A : Optional[int] = alphabet[i - offset]
__A : Union[str, Any] = char
return cipher_alphabet
def lowerCamelCase_ ( _lowercase , _lowercase ) -> str:
return "".join(cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def lowerCamelCase_ ( _lowercase , _lowercase ) -> str:
__A : int = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def lowerCamelCase_ ( ) -> None:
__A : List[Any] = input("Enter message to encode or decode: " ).strip()
__A : Dict = input("Enter keyword: " ).strip()
__A : int = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
__A : Tuple = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
__A : Dict = create_cipher_map(_lowercase )
print(func(_lowercase , _lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709 | import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim" ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads" ) )
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ):
__A : Optional[int] = parent
__A : Optional[int] = batch_size
__A : List[Any] = image_size
__A : int = patch_sizes
__A : Optional[Any] = patch_stride
__A : Tuple = patch_padding
__A : str = is_training
__A : List[str] = use_labels
__A : Union[str, Any] = num_labels
__A : Union[str, Any] = num_channels
__A : Tuple = embed_dim
__A : int = num_heads
__A : str = stride_kv
__A : Optional[int] = depth
__A : Tuple = cls_token
__A : Any = attention_drop_rate
__A : Optional[int] = initializer_range
__A : Optional[Any] = layer_norm_eps
def __UpperCAmelCase( self ):
__A : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Dict = None
if self.use_labels:
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__A : Optional[int] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : int = CvtModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Dict = model(__UpperCAmelCase )
__A : str = (self.image_size, self.image_size)
__A , __A : List[str] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__A : Dict = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__A : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : str = self.num_labels
__A : Any = CvtForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Union[str, Any] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase( self ):
__A : Any = self.prepare_config_and_inputs()
__A , __A , __A : Any = config_and_inputs
__A : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : str = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
lowerCamelCase_ : Optional[int] = (
{"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : Dict = False
lowerCamelCase_ : Optional[Any] = False
def __UpperCAmelCase( self ):
__A : Any = CvtModelTester(self )
__A : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def __UpperCAmelCase( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase( self ):
return
@unittest.skip(reason="Cvt does not output attentions" )
def __UpperCAmelCase( self ):
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def __UpperCAmelCase( self ):
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def __UpperCAmelCase( self ):
pass
def __UpperCAmelCase( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[Any] = model_class(__UpperCAmelCase )
__A : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : str = [*signature.parameters.keys()]
__A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__A : Any = outputs.hidden_states
__A : List[Any] = len(self.model_tester.depth )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : str = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCAmelCase( self ):
pass
@slow
def __UpperCAmelCase( self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : int = CvtModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCamelCase_ ( ) -> Dict:
__A : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase( self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase( self ):
__A : int = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__UpperCAmelCase )
__A : List[str] = self.default_image_processor
__A : Optional[Any] = prepare_img()
__A : List[Any] = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__A : List[Any] = model(**__UpperCAmelCase )
# verify the logits
__A : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__A : List[str] = torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 387 | 0 |
from ...configuration_utils import PretrainedConfig
snake_case__ = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class UpperCAmelCase ( __lowerCamelCase ):
a__: str = """tapas"""
def __init__( self : str , lowerCAmelCase : int=3_0522 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : str=12 , lowerCAmelCase : List[str]=3072 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : int=1024 , lowerCAmelCase : str=[3, 256, 256, 2, 256, 256, 10] , lowerCAmelCase : Any=0.02 , lowerCAmelCase : Any=1E-12 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Dict=10.0 , lowerCAmelCase : Any=0 , lowerCAmelCase : Optional[Any]=1.0 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Tuple=False , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=1.0 , lowerCAmelCase : Dict=1.0 , lowerCAmelCase : Any=False , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Union[str, Any]="ratio" , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[str]=64 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : Dict=False , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Tuple=False , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase : Optional[Any] = vocab_size
lowercase : str = hidden_size
lowercase : Any = num_hidden_layers
lowercase : Any = num_attention_heads
lowercase : List[str] = hidden_act
lowercase : str = intermediate_size
lowercase : Dict = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : Optional[int] = max_position_embeddings
lowercase : Any = type_vocab_sizes
lowercase : List[str] = initializer_range
lowercase : Tuple = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase : int = positive_label_weight
lowercase : Any = num_aggregation_labels
lowercase : Optional[Any] = aggregation_loss_weight
lowercase : Union[str, Any] = use_answer_as_supervision
lowercase : Tuple = answer_loss_importance
lowercase : int = use_normalized_answer_loss
lowercase : Dict = huber_loss_delta
lowercase : Tuple = temperature
lowercase : Any = aggregation_temperature
lowercase : List[str] = use_gumbel_for_cells
lowercase : Union[str, Any] = use_gumbel_for_aggregation
lowercase : Optional[int] = average_approximation_function
lowercase : int = cell_selection_preference
lowercase : Tuple = answer_loss_cutoff
lowercase : str = max_num_rows
lowercase : Union[str, Any] = max_num_columns
lowercase : Dict = average_logits_per_cell
lowercase : List[Any] = select_one_column
lowercase : Union[str, Any] = allow_empty_column_selection
lowercase : Optional[Any] = init_cell_selection_weights_to_zero
lowercase : Dict = reset_position_index_per_cell
lowercase : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
lowercase : List[str] = aggregation_labels
lowercase : Union[str, Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCAmelCase ):
lowercase : Any = {int(lowerCAmelCase ): v for k, v in aggregation_labels.items()}
| 583 |
class UpperCAmelCase :
def __init__( self : Union[str, Any] , lowerCAmelCase : str = "" , lowerCAmelCase : bool = False ):
# Mapping from the first character of the prefix of the node
lowercase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowercase : Union[str, Any] = is_leaf
lowercase : Optional[int] = prefix
def _lowerCAmelCase ( self : str , lowerCAmelCase : str ):
lowercase : Optional[int] = 0
for q, w in zip(self.prefix , lowerCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _lowerCAmelCase ( self : int , lowerCAmelCase : list[str] ):
for word in words:
self.insert(lowerCAmelCase )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase : str ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase : List[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase : Tuple = RadixNode(prefix=lowerCAmelCase , is_leaf=lowerCAmelCase )
else:
lowercase : Union[str, Any] = self.nodes[word[0]]
lowercase , lowercase , lowercase : Any = incoming_node.match(
lowerCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase : Tuple = remaining_prefix
lowercase : Tuple = self.nodes[matching_string[0]]
lowercase : Optional[Any] = RadixNode(lowerCAmelCase , lowerCAmelCase )
lowercase : Any = aux_node
if remaining_word == "":
lowercase : Tuple = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase : str ):
lowercase : List[Any] = self.nodes.get(word[0] , lowerCAmelCase )
if not incoming_node:
return False
else:
lowercase , lowercase , lowercase : Dict = incoming_node.match(
lowerCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase : str ):
lowercase : Optional[int] = self.nodes.get(word[0] , lowerCAmelCase )
if not incoming_node:
return False
else:
lowercase , lowercase , lowercase : Tuple = incoming_node.match(
lowerCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase : List[Any] = list(self.nodes.values() )[0]
lowercase : List[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase : List[str] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase : Dict = False
# If there is 1 edge, we merge it with its child
else:
lowercase : Optional[int] = list(incoming_node.nodes.values() )[0]
lowercase : Dict = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase : Dict = merging_node.nodes
return True
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int = 0 ):
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCamelCase_ ( ):
lowercase : Optional[int] = '''banana bananas bandana band apple all beast'''.split()
lowercase : str = RadixNode()
root.insert_many(UpperCAmelCase_ )
assert all(root.find(UpperCAmelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCamelCase_ ( ):
assert test_trie()
def lowerCamelCase_ ( ):
lowercase : List[str] = RadixNode()
lowercase : Optional[int] = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(UpperCAmelCase_ )
print('''Words:''' , UpperCAmelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 583 | 1 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = 0
snake_case_ : bool = False
snake_case_ : float = 3.0
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[Any]) -> Any:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {})
self.assertDictEqual(MockClass(a=2).to_kwargs() , {"""a""": 2})
self.assertDictEqual(MockClass(a=2 , b=lowerCAmelCase).to_kwargs() , {"""a""": 2, """b""": True})
self.assertDictEqual(MockClass(a=2 , c=2.25).to_kwargs() , {"""a""": 2, """c""": 2.25})
@require_cuda
def UpperCamelCase_ ( self : str) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2)
AcceleratorState._reset_state()
_snake_case : Optional[int] = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler])
print(accelerator.use_fpaa)
_snake_case : Dict = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0)
self.assertEqual(scaler._growth_factor , 2.0)
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5)
self.assertEqual(scaler._growth_interval , 2000)
self.assertEqual(scaler._enabled , lowerCAmelCase)
@require_multi_gpu
def UpperCamelCase_ ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_snake_case : int = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
a__ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
a__ = Accelerator(kwargs_handlers=[ddp_scaler])
a__ = torch.nn.Linear(1_00, 2_00)
a__ = accelerator.prepare(model)
# Check the values changed in kwargs
a__ = """"""
a__ = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 712 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a__ = logging.getLogger(__name__)
def lowercase ( SCREAMING_SNAKE_CASE__ : torch.nn.Module , SCREAMING_SNAKE_CASE__ : BnbQuantizationConfig , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ) -> int:
_snake_case : int = bnb_quantization_config.load_in_abit
_snake_case : Tuple = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
_snake_case : List[Any] = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(device_map.keys() ) > 1:
_snake_case : Tuple = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_snake_case : Union[str, Any] = get_keys_to_not_convert(SCREAMING_SNAKE_CASE__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_snake_case : Optional[Any] = []
_snake_case : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE__ )
# compatibility with peft
_snake_case : Union[str, Any] = load_in_abit
_snake_case : Any = load_in_abit
_snake_case : Optional[int] = get_parameter_device(SCREAMING_SNAKE_CASE__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
_snake_case : int = replace_with_bnb_layers(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , modules_to_not_convert=SCREAMING_SNAKE_CASE__ )
# convert param to the right dtype
_snake_case : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_snake_case : Union[str, Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
_snake_case : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE__ ):
param.to(SCREAMING_SNAKE_CASE__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
_snake_case : Optional[int] = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , modules_to_not_convert=SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_memory=SCREAMING_SNAKE_CASE__ , no_split_module_classes=SCREAMING_SNAKE_CASE__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_snake_case : Union[str, Any] = True
_snake_case : Any = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE__ , offload_state_dict=SCREAMING_SNAKE_CASE__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE__ , device_map=SCREAMING_SNAKE_CASE__ , offload_dir=SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Any=None ) -> List[Any]:
if device_map is None:
if torch.cuda.is_available():
_snake_case : Dict = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
_snake_case : int = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_snake_case : Tuple = {}
_snake_case : List[str] = special_dtypes
_snake_case : int = no_split_module_classes
_snake_case : List[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_snake_case : Optional[int] = get_balanced_memory(
SCREAMING_SNAKE_CASE__ , low_zero=(device_map == """balanced_low_0""") , max_memory=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_snake_case : str = max_memory
_snake_case : Optional[int] = infer_auto_device_map(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# check if don't have any quantized module on the cpu
_snake_case : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_snake_case : Dict = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None ) -> List[Any]:
if modules_to_not_convert is None:
_snake_case : Tuple = []
_snake_case , _snake_case : str = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Tuple=None , ) -> Optional[Any]:
_snake_case : List[str] = False
for name, module in model.named_children():
if current_key_name is None:
_snake_case : List[str] = []
current_key_name.append(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_snake_case : int = """.""".join(SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_snake_case : Optional[int] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_snake_case : List[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_snake_case : Any = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
_snake_case : List[str] = module.weight.data
if module.bias is not None:
_snake_case : List[Any] = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = True
if len(list(module.children() ) ) > 0:
_snake_case , _snake_case : Optional[int] = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> int:
# Create a copy of the model
with init_empty_weights():
_snake_case : Optional[Any] = deepcopy(SCREAMING_SNAKE_CASE__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_snake_case : Tuple = find_tied_parameters(SCREAMING_SNAKE_CASE__ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case : List[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_snake_case : Optional[Any] = sum(SCREAMING_SNAKE_CASE__ , [] )
_snake_case : Optional[Any] = len(SCREAMING_SNAKE_CASE__ ) > 0
# Check if it is a base model
_snake_case : str = False
if hasattr(SCREAMING_SNAKE_CASE__ , """base_model_prefix""" ):
_snake_case : List[Any] = not hasattr(SCREAMING_SNAKE_CASE__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_snake_case : str = list(model.named_children() )
_snake_case : Dict = [list_modules[-1][0]]
# add last module together with tied weights
_snake_case : Optional[int] = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = list(set(SCREAMING_SNAKE_CASE__ ) ) + list(SCREAMING_SNAKE_CASE__ )
# remove ".weight" from the keys
_snake_case : Union[str, Any] = [""".weight""", """.bias"""]
_snake_case : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_snake_case : Optional[Any] = name.replace(SCREAMING_SNAKE_CASE__ , """""" )
filtered_module_names.append(SCREAMING_SNAKE_CASE__ )
return filtered_module_names
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE__ , bnb.nn.Linearabit ):
return True
return False
def lowercase ( SCREAMING_SNAKE_CASE__ : nn.Module ) -> Union[str, Any]:
return next(parameter.parameters() ).device
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 , dtype=SCREAMING_SNAKE_CASE__ , value=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = param_name
_snake_case : List[Any] = model
if "." in tensor_name:
_snake_case : str = tensor_name.split(""".""" )
for split in splits[:-1]:
_snake_case : Tuple = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
_snake_case : Tuple = new_module
_snake_case : Dict = splits[-1]
# offload weights
_snake_case : List[str] = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , )
else:
offload_weight(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
offload_weight(SCREAMING_SNAKE_CASE__ , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , """meta""" , dtype=SCREAMING_SNAKE_CASE__ , value=torch.empty(*param.size() ) )
| 198 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
A_ : int = """maskformer-swin"""
A_ : int = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , a__ : List[Any]=224 , a__ : Tuple=4 , a__ : List[Any]=3 , a__ : str=96 , a__ : Dict=[2, 2, 6, 2] , a__ : Dict=[3, 6, 12, 24] , a__ : Union[str, Any]=7 , a__ : str=4.0 , a__ : Optional[int]=True , a__ : Any=0.0 , a__ : Any=0.0 , a__ : int=0.1 , a__ : Optional[Any]="gelu" , a__ : Optional[Any]=False , a__ : List[Any]=0.02 , a__ : Optional[int]=1e-5 , a__ : Union[str, Any]=None , a__ : List[Any]=None , **a__ : Tuple , ):
'''simple docstring'''
super().__init__(**a__ )
lowerCAmelCase__ : Optional[Any] = image_size
lowerCAmelCase__ : Dict = patch_size
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : List[Any] = embed_dim
lowerCAmelCase__ : Optional[Any] = depths
lowerCAmelCase__ : Optional[int] = len(a__ )
lowerCAmelCase__ : List[Any] = num_heads
lowerCAmelCase__ : Union[str, Any] = window_size
lowerCAmelCase__ : Tuple = mlp_ratio
lowerCAmelCase__ : Tuple = qkv_bias
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Dict = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = drop_path_rate
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[Any] = use_absolute_embeddings
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ : Optional[Any] = int(embed_dim * 2 ** (len(a__ ) - 1) )
lowerCAmelCase__ : Union[str, Any] = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(a__ ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
| 378 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCAmelCase :
def __init__( self : str , a__ : Union[str, Any] , a__ : Union[str, Any]=13 , a__ : Union[str, Any]=7 , a__ : Any=True , a__ : str=True , a__ : Optional[Any]=True , a__ : Any=99 , a__ : Dict=32 , a__ : Tuple=5 , a__ : List[Any]=4 , a__ : List[Any]=37 , a__ : List[str]="gelu" , a__ : Dict=0.1 , a__ : str=0.1 , a__ : str=512 , a__ : List[str]=16 , a__ : Dict=2 , a__ : Union[str, Any]=0.02 , a__ : Dict=3 , a__ : List[Any]=4 , a__ : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : int = is_training
lowerCAmelCase__ : int = use_token_type_ids
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = max_position_embeddings
lowerCAmelCase__ : str = type_vocab_size
lowerCAmelCase__ : List[Any] = type_sequence_label_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Tuple = num_labels
lowerCAmelCase__ : Optional[Any] = num_choices
lowerCAmelCase__ : int = scope
lowerCAmelCase__ : List[str] = self.vocab_size - 1
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Dict = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Any = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A ( self : Dict , a__ : Any , a__ : Any , a__ : int , a__ : str , *a__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = OpenAIGPTModel(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a__ , token_type_ids=a__ , head_mask=a__ )
lowerCAmelCase__ : Optional[int] = model(a__ , token_type_ids=a__ )
lowerCAmelCase__ : Any = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : Optional[int] , a__ : List[Any] , a__ : int , a__ : Dict , a__ : Any , *a__ : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = OpenAIGPTLMHeadModel(a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Dict = model(a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : List[str] , a__ : str , a__ : Dict , a__ : Union[str, Any] , a__ : int , *a__ : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = OpenAIGPTDoubleHeadsModel(a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : Any , a__ : Any , a__ : Union[str, Any] , a__ : Optional[int] , a__ : Any , *a__ : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : Union[str, Any] = OpenAIGPTForSequenceClassification(a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Tuple = model(a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : str = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
A_ : str = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A_ : Any = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A_ : int = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A ( self : List[Any] , a__ : Optional[Any] , a__ : Optional[int] , a__ : Optional[Any] , a__ : List[Any] , a__ : int ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _A ( self : Dict , a__ : Optional[int] , a__ : List[Any] , a__ : List[Any]=False ):
'''simple docstring'''
lowerCAmelCase__ : Any = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCAmelCase__ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a__ , )
lowerCAmelCase__ : Union[str, Any] = inputs_dict["labels"]
lowerCAmelCase__ : int = inputs_dict["labels"]
lowerCAmelCase__ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a__ , )
lowerCAmelCase__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
return inputs_dict
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = OpenAIGPTModelTester(self )
lowerCAmelCase__ : Any = ConfigTester(self , config_class=a__ , n_embd=37 )
def _A ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a__ )
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a__ )
def _A ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a__ )
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a__ )
@slow
def _A ( self : Tuple ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Any = OpenAIGPTModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(a__ )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a__ ) # the president is
lowerCAmelCase__ : Optional[int] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCAmelCase__ : Optional[int] = model.generate(a__ , do_sample=a__ )
self.assertListEqual(output_ids[0].tolist() , a__ )
| 378 | 1 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = 'pytorch_model.bin'
@dataclasses.dataclass
class __UpperCamelCase :
_UpperCAmelCase = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class __UpperCamelCase :
_UpperCAmelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
_UpperCAmelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "A csv or a json file containing the validation data."} )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "The name of the task to train on."} , )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class __UpperCamelCase :
_UpperCAmelCase = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
_UpperCAmelCase = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
_UpperCAmelCase = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
_UpperCAmelCase = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
_UpperCAmelCase = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
_UpperCAmelCase = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
_UpperCAmelCase = dataclasses.field(
default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "Random seed for initialization."} , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_lowerCAmelCase : Optional[Any] = dataset.filter(lambda _lowerCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_lowerCAmelCase : Optional[int] = int(eval_result * len(__lowerCAmelCase ) )
print(__lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = dataset.sort('probability' , reverse=__lowerCAmelCase )
_lowerCAmelCase : Optional[int] = dataset.select(range(__lowerCAmelCase ) )
_lowerCAmelCase : Any = dataset.remove_columns(['label', 'probability'] )
_lowerCAmelCase : int = dataset.rename_column('prediction' , 'label' )
_lowerCAmelCase : List[str] = dataset.map(lambda _lowerCamelCase : {"label": idalabel[example["label"]]} )
_lowerCAmelCase : Dict = dataset.shuffle(seed=args.seed )
_lowerCAmelCase : Union[str, Any] = os.path.join(__lowerCAmelCase , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__lowerCAmelCase , index=__lowerCAmelCase )
else:
dataset.to_json(__lowerCAmelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : List[str] = STModelArguments(model_name_or_path=__lowerCAmelCase )
_lowerCAmelCase : int = STDataArguments(train_file=__lowerCAmelCase , infer_file=__lowerCAmelCase )
_lowerCAmelCase : Any = STTrainingArguments(output_dir=__lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__lowerCAmelCase ).items():
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for key, value in kwargs.items():
if hasattr(__lowerCAmelCase , __lowerCAmelCase ):
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Sanity checks
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Dict = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_lowerCAmelCase : List[Any] = args.train_file
_lowerCAmelCase : List[str] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_lowerCAmelCase : Union[str, Any] = args.eval_file
for key in data_files:
_lowerCAmelCase : Tuple = data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
_lowerCAmelCase : List[str] = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
_lowerCAmelCase : Tuple = f"""{args.output_dir}/self-train_iter-{{}}""".format
_lowerCAmelCase : str = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
accelerator.wait_for_everyone()
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Any = False
# Show the progress bar
_lowerCAmelCase : Dict = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_lowerCAmelCase : Tuple = data_dir_format(__lowerCAmelCase )
assert os.path.exists(__lowerCAmelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_lowerCAmelCase : Optional[int] = os.path.join(__lowerCAmelCase , 'stage-1' )
_lowerCAmelCase : List[Any] = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__lowerCAmelCase , __lowerCAmelCase ):
arguments_dict.update({key: value} )
_lowerCAmelCase : Tuple = os.path.join(__lowerCAmelCase , 'best-checkpoint' , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , __lowerCAmelCase , __lowerCAmelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , __lowerCAmelCase )
finetune(**__lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCAmelCase )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , __lowerCAmelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_lowerCAmelCase : Tuple = os.path.join(__lowerCAmelCase , 'best-checkpoint' )
_lowerCAmelCase : Tuple = os.path.join(__lowerCAmelCase , 'stage-2' )
# Update arguments_dict
_lowerCAmelCase : Dict = model_path
_lowerCAmelCase : Optional[int] = data_files['train']
_lowerCAmelCase : Union[str, Any] = current_output_dir
_lowerCAmelCase : Optional[int] = os.path.join(__lowerCAmelCase , 'best-checkpoint' , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , __lowerCAmelCase , __lowerCAmelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , __lowerCAmelCase )
finetune(**__lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCAmelCase )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , __lowerCAmelCase )
_lowerCAmelCase : int = iteration
_lowerCAmelCase : Dict = data_dir_format(iteration + 1 )
_lowerCAmelCase : int = AutoConfig.from_pretrained(os.path.join(__lowerCAmelCase , 'best-checkpoint' ) )
_lowerCAmelCase : List[Any] = config.idalabel
_lowerCAmelCase : Dict = os.path.join(__lowerCAmelCase , 'eval_results_best-checkpoint.json' )
_lowerCAmelCase : str = os.path.join(__lowerCAmelCase , 'test_results_best-checkpoint.json' )
assert os.path.exists(__lowerCAmelCase )
with open(__lowerCAmelCase , 'r' ) as f:
_lowerCAmelCase : List[str] = float(json.load(__lowerCAmelCase )[args.eval_metric] )
_lowerCAmelCase : int = os.path.join(__lowerCAmelCase , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(__lowerCAmelCase )
# Loading the dataset from local csv or json files.
_lowerCAmelCase : int = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
_lowerCAmelCase : Optional[int] = load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
shutil.copy(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__lowerCAmelCase ):
shutil.copy(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
accelerator.wait_for_everyone()
_lowerCAmelCase : Union[str, Any] = os.path.join(__lowerCAmelCase , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_lowerCAmelCase : Tuple = eval_result
if best_iteration is None:
_lowerCAmelCase : Any = new_iteration
_lowerCAmelCase : Any = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_lowerCAmelCase : Tuple = new_iteration
_lowerCAmelCase : List[str] = new_eval_result
_lowerCAmelCase : List[Any] = 0
else:
if new_eval_result == best_eval_result:
_lowerCAmelCase : Optional[Any] = new_iteration
_lowerCAmelCase : Union[str, Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_lowerCAmelCase : List[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , __lowerCAmelCase )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , __lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCAmelCase , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(__lowerCAmelCase , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , __lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCAmelCase , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__lowerCAmelCase , 'eval_results_best-iteration.json' ) , ) | 709 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'timm_backbone'
def __init__( self : Any , lowerCamelCase__ : str=None , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=None , **lowerCamelCase__ : List[str] , ):
super().__init__(**lowerCamelCase__ )
a__ : Any = backbone
a__ : Any = num_channels
a__ : Union[str, Any] = features_only
a__ : List[str] = use_pretrained_backbone
a__ : Optional[Any] = True
a__ : Optional[int] = out_indices if out_indices is not None else (-1,)
| 37 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = field(default_factory=__lowerCamelCase )
_lowerCamelCase = field(default_factory=__lowerCamelCase )
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Tensor):
UpperCamelCase__ : str = len(list(m.modules())) == 1 or isinstance(UpperCAmelCase_ , nn.Convad) or isinstance(UpperCAmelCase_ , nn.BatchNormad)
if has_not_submodules:
self.traced.append(UpperCAmelCase_)
def __call__( self : List[str] , UpperCAmelCase_ : Tensor):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(UpperCAmelCase_)
[x.remove() for x in self.handles]
return self
@property
def __UpperCamelCase ( self : Union[str, Any]):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase_: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 1
_lowerCamelCase = field(default_factory=__lowerCamelCase )
_lowerCamelCase = field(default_factory=__lowerCamelCase )
_lowerCamelCase = True
def __call__( self : Optional[Any] , UpperCAmelCase_ : Tensor):
UpperCamelCase__ : int = Tracker(self.dest)(UpperCAmelCase_).parametrized
UpperCamelCase__ : Any = Tracker(self.src)(UpperCAmelCase_).parametrized
UpperCamelCase__ : Any = list(filter(lambda UpperCAmelCase_: type(UpperCAmelCase_) not in self.src_skip , UpperCAmelCase_))
UpperCamelCase__ : Optional[int] = list(filter(lambda UpperCAmelCase_: type(UpperCAmelCase_) not in self.dest_skip , UpperCAmelCase_))
if len(UpperCAmelCase_) != len(UpperCAmelCase_) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(UpperCAmelCase_)} operations while'
F' destination module has {len(UpperCAmelCase_)}.')
for dest_m, src_m in zip(UpperCAmelCase_ , UpperCAmelCase_):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}')
class __lowercase (nn.Module ):
def __init__( self : Any , UpperCAmelCase_ : nn.Module):
super().__init__()
UpperCamelCase__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block'), F'Unexpected layer name {k}'
UpperCamelCase__ : Optional[Any] = len(UpperCAmelCase_) + 1
feature_blocks.append((F'res{block_index}', v))
UpperCamelCase__ : Any = nn.ModuleDict(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Tensor):
return get_trunk_forward_outputs(
UpperCAmelCase_ , out_feat_keys=UpperCAmelCase_ , feature_blocks=self._feature_blocks , )
class __lowercase (__lowerCamelCase ):
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : str):
UpperCamelCase__ : int = x.split('-')
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__( self : Optional[Any] , UpperCAmelCase_ : str):
# default to timm!
if x not in self:
UpperCamelCase__ : List[Any] = self.convert_name_to_timm(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = partial(lambda: (timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_).eval(), None))
else:
UpperCamelCase__ : List[str] = super().__getitem__(UpperCAmelCase_)
return val
class __lowercase (__lowerCamelCase ):
def __getitem__( self : Tuple , UpperCAmelCase_ : str):
if "seer" in x and "in1k" not in x:
UpperCamelCase__ : Optional[Any] = RegNetModel
else:
UpperCamelCase__ : Optional[Any] = RegNetForImageClassification
return val
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
for from_key, to_key in keys:
UpperCamelCase__ : str = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}')
return to_state_dict
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ) -> List[Any]:
print(f'Converting {name}...')
with torch.no_grad():
UpperCamelCase__, UpperCamelCase__ : Any = from_model_func()
UpperCamelCase__ : int = our_model_func(lowerCamelCase_).eval()
UpperCamelCase__ : Union[str, Any] = ModuleTransfer(src=lowerCamelCase_ , dest=lowerCamelCase_ , raise_if_mismatch=lowerCamelCase_)
UpperCamelCase__ : Dict = torch.randn((1, 3, 224, 224))
module_transfer(lowerCamelCase_)
if from_state_dict is not None:
UpperCamelCase__ : Any = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCamelCase__ : Optional[int] = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
UpperCamelCase__ : Optional[Any] = manually_copy_vissl_head(lowerCamelCase_ , our_model.state_dict() , lowerCamelCase_)
our_model.load_state_dict(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = our_model(lowerCamelCase_ , output_hidden_states=lowerCamelCase_)
UpperCamelCase__ : Dict = (
our_outputs.logits if isinstance(lowerCamelCase_ , lowerCamelCase_) else our_outputs.last_hidden_state
)
UpperCamelCase__ : Optional[int] = from_model(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = from_output[-1] if type(lowerCamelCase_) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCamelCase__ : Any = our_outputs.hidden_states[-1]
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = 224 if 'seer' not in name else 384
# we can use the convnext one
UpperCamelCase__ : int = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowerCamelCase_)
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
print(f'Pushed {name}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = True) -> int:
UpperCamelCase__ : Any = 'imagenet-1k-id2label.json'
UpperCamelCase__ : int = 1_000
UpperCamelCase__ : Tuple = (1, num_labels)
UpperCamelCase__ : Dict = 'huggingface/label-files'
UpperCamelCase__ : str = num_labels
UpperCamelCase__ : Optional[int] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : Dict = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Dict = idalabel
UpperCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : int = partial(lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_)
UpperCamelCase__ : Tuple = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='x'),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='x'),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='x'),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='x'),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='x'),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type='x'),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type='x'),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type='x'),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type='x'),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type='x'),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type='x'),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type='x'),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010),
}
UpperCamelCase__ : Dict = NameToOurModelFuncMap()
UpperCamelCase__ : Optional[int] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCamelCase_ , lowerCamelCase_) -> Tuple[nn.Module, Dict]:
UpperCamelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , model_dir=str(lowerCamelCase_) , map_location='cpu')
UpperCamelCase__ : List[str] = model_func()
# check if we have a head, if yes add it
UpperCamelCase__ : str = files['classy_state_dict']['base_model']['model']
UpperCamelCase__ : str = model_state_dict['trunk']
model.load_state_dict(lowerCamelCase_)
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCamelCase__ : Dict = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : Any = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : int = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf()) , )
UpperCamelCase__ : List[str] = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52))) , )
# IN1K finetuned
UpperCamelCase__ : Tuple = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : Optional[Any] = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf()) , )
UpperCamelCase__ : Any = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf()) , )
UpperCamelCase__ : Tuple = partial(
lowerCamelCase_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52))) , )
if model_name:
convert_weight_and_push(
lowerCamelCase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowerCamelCase_ , lowerCamelCase_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCamelCase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 596 | 0 |
import numpy as np
class a__ :
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None ) -> Dict:
self.set_matricies(red=lowercase__ , green=lowercase__ , blue=lowercase__ , red_edge=lowercase__ , nir=lowercase__ )
def _lowerCamelCase ( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None ) -> List[str]:
if red is not None:
__A = red
if green is not None:
__A = green
if blue is not None:
__A = blue
if red_edge is not None:
__A = red_edge
if nir is not None:
__A = nir
return True
def _lowerCamelCase ( self , lowercase__="" , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None ) -> List[str]:
self.set_matricies(red=lowercase__ , green=lowercase__ , blue=lowercase__ , red_edge=lowercase__ , nir=lowercase__ )
__A = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def _lowerCamelCase ( self ) -> Optional[int]:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _lowerCamelCase ( self ) -> int:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _lowerCamelCase ( self ) -> Optional[int]:
return self.nir * (self.red / (self.green**2))
def _lowerCamelCase ( self ) -> Any:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _lowerCamelCase ( self ) -> str:
return (self.nir - self.red) / (self.nir + self.red)
def _lowerCamelCase ( self ) -> str:
return (self.nir - self.blue) / (self.nir + self.blue)
def _lowerCamelCase ( self ) -> Union[str, Any]:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _lowerCamelCase ( self ) -> List[str]:
return (self.nir - self.green) / (self.nir + self.green)
def _lowerCamelCase ( self ) -> Any:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _lowerCamelCase ( self ) -> Tuple:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _lowerCamelCase ( self ) -> int:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _lowerCamelCase ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _lowerCamelCase ( self , lowercase__=0.08 , lowercase__=1.22 , lowercase__=0.03 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _lowerCamelCase ( self ) -> str:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _lowerCamelCase ( self ) -> Dict:
return (self.nir / self.green) - 1
def _lowerCamelCase ( self ) -> str:
return (self.nir / self.redEdge) - 1
def _lowerCamelCase ( self ) -> Any:
return (self.red - self.blue) / self.red
def _lowerCamelCase ( self ) -> int:
__A = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _lowerCamelCase ( self ) -> Any:
return self.nir - self.green
def _lowerCamelCase ( self ) -> str:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _lowerCamelCase ( self ) -> Tuple:
__A = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def _lowerCamelCase ( self , lowercase__=0.16 ) -> int:
return (self.nir - self.green) / (self.nir + self.green + y)
def _lowerCamelCase ( self , lowercase__=0.5 ) -> Union[str, Any]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _lowerCamelCase ( self ) -> str:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _lowerCamelCase ( self , lowercase__=None , lowercase__=None ) -> int:
return (self.nir - b) / (a * self.red)
def _lowerCamelCase ( self ) -> Optional[int]:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _lowerCamelCase ( self ) -> str:
return (self.red + self.green + self.blue) / 30.5
def _lowerCamelCase ( self ) -> List[str]:
return self.nir / self.red
def _lowerCamelCase ( self ) -> Optional[Any]:
return (self.rvi() - 1) / (self.rvi() + 1)
def _lowerCamelCase ( self ) -> Optional[int]:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _lowerCamelCase ( self ) -> Optional[int]:
return self.green / (self.nir + self.red + self.green)
def _lowerCamelCase ( self ) -> List[str]:
return self.nir / (self.nir + self.red + self.green)
def _lowerCamelCase ( self ) -> Dict:
return self.red / (self.nir + self.red + self.green)
def _lowerCamelCase ( self ) -> Dict:
return (self.green - self.red) / (self.green + self.red)
def _lowerCamelCase ( self ) -> List[str]:
return (self.red - self.green) / (self.red + self.green)
def _lowerCamelCase ( self ) -> Union[str, Any]:
__A = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__A = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _lowerCamelCase ( self ) -> Optional[Any]:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _lowerCamelCase ( self ) -> str:
return self.nir / self.red
def _lowerCamelCase ( self ) -> List[str]:
return (self.ndvi() + 0.5) ** (1 / 2)
def _lowerCamelCase ( self ) -> Any:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 704 |
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
assert column_title.isupper()
__A = 0
__A = len(lowerCAmelCase__ ) - 1
__A = 0
while index >= 0:
__A = (ord(column_title[index] ) - 64) * pow(26 , lowerCAmelCase__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 205 | 0 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_snake_case : Optional[int] = logging.getLogger(__name__)
_snake_case : str = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_snake_case : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
a_ = field(default=_UpperCamelCase , metadata={"""help""": """Whether ot not to use whole word mask."""} )
a_ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a_ = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
a_ = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
a_ = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def a_ ( lowerCAmelCase_ : DataTrainingArguments, lowerCAmelCase_ : PreTrainedTokenizer, lowerCAmelCase_ : bool = False, lowerCAmelCase_ : Optional[str] = None, ):
def _dataset(lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=lowerCAmelCase_, file_path=lowerCAmelCase_, block_size=args.block_size, ref_path=lowerCAmelCase_, )
return LineByLineTextDataset(tokenizer=lowerCAmelCase_, file_path=lowerCAmelCase_, block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCAmelCase_, file_path=lowerCAmelCase_, block_size=args.block_size, overwrite_cache=args.overwrite_cache, cache_dir=lowerCAmelCase_, )
if evaluate:
return _dataset(args.eval_data_file, args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCAmelCase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file, args.train_ref_file )
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCAmelCase_, cache_dir=model_args.cache_dir, )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase = AutoModelWithLMHead.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
__lowerCAmelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowerCAmelCase = min(data_args.block_size, tokenizer.max_len )
# Get datasets
__lowerCAmelCase = (
get_dataset(lowerCAmelCase_, tokenizer=lowerCAmelCase_, cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowerCAmelCase = (
get_dataset(lowerCAmelCase_, tokenizer=lowerCAmelCase_, evaluate=lowerCAmelCase_, cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowerCAmelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCAmelCase_, plm_probability=data_args.plm_probability, max_span_length=data_args.max_span_length, )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowerCAmelCase = DataCollatorForWholeWordMask(
tokenizer=lowerCAmelCase_, mlm_probability=data_args.mlm_probability )
else:
__lowerCAmelCase = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase_, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCAmelCase_, args=lowerCAmelCase_, data_collator=lowerCAmelCase_, train_dataset=lowerCAmelCase_, eval_dataset=lowerCAmelCase_, prediction_loss_only=lowerCAmelCase_, )
# Training
if training_args.do_train:
__lowerCAmelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCAmelCase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output['eval_loss'] )
__lowerCAmelCase = {'perplexity': perplexity}
__lowerCAmelCase = os.path.join(training_args.output_dir, 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s', lowerCAmelCase_, str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(lowerCAmelCase_ )
return results
def a_ ( lowerCAmelCase_ : Optional[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 53 |
"""simple docstring"""
def snake_case_ ( A_ : list, A_ : list, A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCamelCase : int = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Dict = knapsack(A_, A_, A_, A_, index + 1 )
if weights[index] <= max_weight:
_lowerCamelCase : Tuple = values[index] + knapsack(
A_, A_, A_, max_weight - weights[index], index + 1 )
return max(A_, A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowerCAmelCase__ = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
lowerCAmelCase__ = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
lowerCAmelCase__ = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
lowerCAmelCase__ = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
lowerCAmelCase__ = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
def snake_case_ (self ) -> Optional[Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def snake_case_ (self , __a , __a , __a=[1, 10, 1_00] , __a=4 , __a=3.0 ) -> Dict:
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=__a ) as executor:
UpperCamelCase = []
UpperCamelCase = Counter()
UpperCamelCase = 0
UpperCamelCase = defaultdict(__a )
for task_id, (candidates, test_case) in enumerate(zip(__a , __a ) ):
for candidate in candidates:
UpperCamelCase = candidate + "\n" + test_case
UpperCamelCase = (test_program, timeout, task_id, completion_id[task_id])
UpperCamelCase = executor.submit(__a , *__a )
futures.append(__a )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__a ):
UpperCamelCase = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
UpperCamelCase , UpperCamelCase = [], []
for result in results.values():
result.sort()
UpperCamelCase = [r[1]["passed"] for r in result]
total.append(len(__a ) )
correct.append(sum(__a ) )
UpperCamelCase = np.array(__a )
UpperCamelCase = np.array(__a )
UpperCamelCase = k
UpperCamelCase = {F"pass@{k}": estimate_pass_at_k(__a , __a , __a ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def estimator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = itertools.repeat(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
else:
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = iter(_SCREAMING_SNAKE_CASE )
return np.array([estimator(int(_SCREAMING_SNAKE_CASE ) , int(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) for n, c in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] )
| 544 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F"{test_file} instead." )
UpperCamelCase = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
UpperCamelCase = components[:-1] + [test_fn.replace(".py" , "" )]
UpperCamelCase = ".".join(_SCREAMING_SNAKE_CASE )
return test_module_path
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_module_path(_SCREAMING_SNAKE_CASE )
UpperCamelCase = importlib.import_module(_SCREAMING_SNAKE_CASE )
return test_module
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , "all_model_classes" , [] )
if len(_SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_test_classes(_SCREAMING_SNAKE_CASE )
UpperCamelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , "setUp" ):
test.setUp()
UpperCamelCase = None
if hasattr(_SCREAMING_SNAKE_CASE , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCamelCase = test.model_tester.__class__
return model_tester
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_test_classes(_SCREAMING_SNAKE_CASE )
UpperCamelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = []
for test_class in test_classes:
UpperCamelCase = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_test_classes(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_model_classes(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_model_classes(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(_SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 544 | 1 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowercase ( _a ):
@staticmethod
def __snake_case( _UpperCamelCase : ArgumentParser ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=lowerCAmelCase_ , help="Name of the model to download" )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : Any , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : bool ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = cache
SCREAMING_SNAKE_CASE = force
SCREAMING_SNAKE_CASE = trust_remote_code
def __snake_case( self : int ) -> List[str]:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 403 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : Tuple = 'openai-gpt'
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , snake_case_ : Tuple=40478 , snake_case_ : Optional[Any]=512 , snake_case_ : Optional[Any]=768 , snake_case_ : Any=12 , snake_case_ : List[str]=12 , snake_case_ : List[str]="gelu" , snake_case_ : str=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : List[Any]=1e-5 , snake_case_ : Dict=0.02 , snake_case_ : Tuple="cls_index" , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[Any]=None , snake_case_ : Optional[int]=True , snake_case_ : Optional[Any]=0.1 , **snake_case_ : Any , ):
__snake_case = vocab_size
__snake_case = n_positions
__snake_case = n_embd
__snake_case = n_layer
__snake_case = n_head
__snake_case = afn
__snake_case = resid_pdrop
__snake_case = embd_pdrop
__snake_case = attn_pdrop
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = summary_type
__snake_case = summary_use_proj
__snake_case = summary_activation
__snake_case = summary_first_dropout
__snake_case = summary_proj_to_labels
super().__init__(**snake_case_ )
| 718 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
__snake_case = 3_84
if "tiny" in model_name:
__snake_case = [3, 3, 9, 3]
__snake_case = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
__snake_case = [3, 3, 27, 3]
__snake_case = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
__snake_case = [3, 3, 27, 3]
__snake_case = [1_28, 2_56, 5_12, 10_24]
__snake_case = 5_12
if "large" in model_name:
__snake_case = [3, 3, 27, 3]
__snake_case = [1_92, 3_84, 7_68, 15_36]
__snake_case = 7_68
if "xlarge" in model_name:
__snake_case = [3, 3, 27, 3]
__snake_case = [2_56, 5_12, 10_24, 20_48]
__snake_case = 10_24
# set label information
__snake_case = 1_50
__snake_case = "huggingface/label-files"
__snake_case = "ade20k-id2label.json"
__snake_case = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__snake_case = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = ConvNextConfig(
depths=SCREAMING_SNAKE_CASE , hidden_sizes=SCREAMING_SNAKE_CASE , out_features=["stage1", "stage2", "stage3", "stage4"] )
__snake_case = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE , auxiliary_in_channels=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , )
return config
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
__snake_case = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__snake_case = dct.pop(SCREAMING_SNAKE_CASE )
__snake_case = val
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
__snake_case = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__snake_case = model_name_to_url[model_name]
__snake_case = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="cpu" )["state_dict"]
__snake_case = get_upernet_config(SCREAMING_SNAKE_CASE )
__snake_case = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__snake_case = state_dict.pop(SCREAMING_SNAKE_CASE )
if "bn" in key:
__snake_case = key.replace("bn" , "batch_norm" )
__snake_case = val
# rename keys
__snake_case = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify on image
__snake_case = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__snake_case = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert("RGB" )
__snake_case = SegformerImageProcessor()
__snake_case = processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
__snake_case = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
__snake_case = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
__snake_case = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
__snake_case = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
__snake_case = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 614 | 0 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
__snake_case = OmegaConf.load(_lowerCamelCase )
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
__snake_case = {}
__snake_case = '''first_stage_model.'''
for key in keys:
if key.startswith(_lowerCamelCase ):
__snake_case = state_dict[key]
# extract state_dict for UNetLDM
__snake_case = {}
__snake_case = '''model.diffusion_model.'''
for key in keys:
if key.startswith(_lowerCamelCase ):
__snake_case = state_dict[key]
__snake_case = config.model.params.first_stage_config.params
__snake_case = config.model.params.unet_config.params
__snake_case = VQModel(**_lowerCamelCase ).eval()
vqvae.load_state_dict(_lowerCamelCase )
__snake_case = UNetLDMModel(**_lowerCamelCase ).eval()
unet.load_state_dict(_lowerCamelCase )
__snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_lowerCamelCase , )
__snake_case = LDMPipeline(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
pipeline.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 24 |
'''simple docstring'''
import torch
from transformers import AutoModel
class UpperCamelCase_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Any , _lowerCamelCase : Optional[int]="sayef/fsner-bert-base-uncased" ) -> List[Any]:
super(_lowerCamelCase , self ).__init__()
__magic_name__ = AutoModel.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
__magic_name__ = torch.nn.CosineSimilarity(3 , 1e-08 )
__magic_name__ = torch.nn.Softmax(dim=1 )
def __A ( self : Tuple , **_lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return self.bert(**_lowerCamelCase ).last_hidden_state
def __A ( self : Dict , _lowerCamelCase : Dict ) -> Dict:
return token_embeddings.sum(2 , keepdim=_lowerCamelCase )
def __A ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple=1 ) -> Optional[Any]:
return self.softmax(T * self.cos(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ) -> List[str]:
__magic_name__ = W_supports["sizes"].tolist()
__magic_name__ = W_supports["start_token_id"].item()
__magic_name__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = W_supports["input_ids"] == start_token_id
__magic_name__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_lowerCamelCase ):
if i == 0:
__magic_name__ = 0
else:
__magic_name__ = support_sizes[i - 1]
__magic_name__ = S[s : s + size][start_token_masks[s : s + size]]
__magic_name__ = S[s : s + size][end_token_masks[s : s + size]]
__magic_name__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__magic_name__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__magic_name__ = torch.vstack((p_starts, p_start) )
__magic_name__ = torch.vstack((p_ends, p_end) )
else:
__magic_name__ = p_start
__magic_name__ = p_end
return p_starts, p_ends
| 664 | 0 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCAmelCase : int = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 146 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
__SCREAMING_SNAKE_CASE: Optional[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
__SCREAMING_SNAKE_CASE: Dict = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
__SCREAMING_SNAKE_CASE: Optional[Any] = transform(UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
return image
def lowerCAmelCase ( UpperCamelCase__ : List[str] ) -> int:
"""simple docstring"""
if "visual_encoder" in key:
__SCREAMING_SNAKE_CASE: List[Any] = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , UpperCamelCase__ )
if "blocks" in key:
__SCREAMING_SNAKE_CASE: Optional[int] = re.sub(R'''blocks''' , '''layers''' , UpperCamelCase__ )
if "attn" in key:
__SCREAMING_SNAKE_CASE: List[Any] = re.sub(R'''attn''' , '''self_attn''' , UpperCamelCase__ )
if "norm1" in key:
__SCREAMING_SNAKE_CASE: Dict = re.sub(R'''norm1''' , '''layer_norm1''' , UpperCamelCase__ )
if "norm2" in key:
__SCREAMING_SNAKE_CASE: Union[str, Any] = re.sub(R'''norm2''' , '''layer_norm2''' , UpperCamelCase__ )
if "encoder.norm" in key:
__SCREAMING_SNAKE_CASE: Tuple = re.sub(R'''encoder.norm''' , '''post_layernorm''' , UpperCamelCase__ )
if "encoder.patch_embed.proj" in key:
__SCREAMING_SNAKE_CASE: Tuple = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , UpperCamelCase__ )
if "encoder.pos_embed" in key:
__SCREAMING_SNAKE_CASE: Union[str, Any] = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , UpperCamelCase__ )
if "encoder.cls_token" in key:
__SCREAMING_SNAKE_CASE: int = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , UpperCamelCase__ )
if "self_attn" in key:
__SCREAMING_SNAKE_CASE: Optional[Any] = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , UpperCamelCase__ )
return key
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=None ) -> int:
"""simple docstring"""
if config_path is not None:
__SCREAMING_SNAKE_CASE: str = BlipConfig.from_pretrained(UpperCamelCase__ )
else:
__SCREAMING_SNAKE_CASE: List[Any] = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__SCREAMING_SNAKE_CASE: Dict = BlipForConditionalGeneration(UpperCamelCase__ ).eval()
__SCREAMING_SNAKE_CASE: Union[str, Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
__SCREAMING_SNAKE_CASE: Any = blip_decoder(pretrained=UpperCamelCase__ , image_size=384 , vit='''base''' )
__SCREAMING_SNAKE_CASE: Any = pt_model.eval()
__SCREAMING_SNAKE_CASE: str = pt_model.state_dict()
for key in modified_state_dict.copy():
__SCREAMING_SNAKE_CASE: Optional[int] = modified_state_dict.pop(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: str = rename_key(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Union[str, Any] = value
hf_model.load_state_dict(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Dict = 384
__SCREAMING_SNAKE_CASE: Optional[Any] = load_demo_image(image_size=UpperCamelCase__ , device='''cpu''' )
__SCREAMING_SNAKE_CASE: List[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__SCREAMING_SNAKE_CASE: Any = tokenizer(['''a picture of'''] ).input_ids
__SCREAMING_SNAKE_CASE: Optional[int] = hf_model.generate(UpperCamelCase__ , UpperCamelCase__ )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
__SCREAMING_SNAKE_CASE: Dict = hf_model.generate(UpperCamelCase__ )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(UpperCamelCase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__SCREAMING_SNAKE_CASE: Tuple = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
__SCREAMING_SNAKE_CASE: str = blip_vqa(pretrained=UpperCamelCase__ , image_size=UpperCamelCase__ , vit='''base''' )
vqa_model.eval()
__SCREAMING_SNAKE_CASE: Optional[int] = vqa_model.state_dict()
for key in modified_state_dict.copy():
__SCREAMING_SNAKE_CASE: List[Any] = modified_state_dict.pop(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Union[str, Any] = rename_key(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: str = value
__SCREAMING_SNAKE_CASE: Union[str, Any] = BlipForQuestionAnswering(UpperCamelCase__ )
hf_vqa_model.load_state_dict(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Optional[Any] = ['''How many dogs are in this image?''']
__SCREAMING_SNAKE_CASE: str = tokenizer(UpperCamelCase__ , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE: Optional[Any] = hf_vqa_model.generate(UpperCamelCase__ , UpperCamelCase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
__SCREAMING_SNAKE_CASE: List[str] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
__SCREAMING_SNAKE_CASE: Any = blip_itm(pretrained=UpperCamelCase__ , image_size=UpperCamelCase__ , vit='''base''' )
itm_model.eval()
__SCREAMING_SNAKE_CASE: Union[str, Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
__SCREAMING_SNAKE_CASE: int = modified_state_dict.pop(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Optional[int] = rename_key(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Union[str, Any] = value
__SCREAMING_SNAKE_CASE: Tuple = BlipForImageTextRetrieval(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Optional[int] = ['''A picture of a woman with a dog sitting in a beach''']
__SCREAMING_SNAKE_CASE: Union[str, Any] = tokenizer(
UpperCamelCase__ , return_tensors='''pt''' , padding='''max_length''' , truncation=UpperCamelCase__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(UpperCamelCase__ )
hf_itm_model.eval()
__SCREAMING_SNAKE_CASE: str = hf_itm_model(UpperCamelCase__ , UpperCamelCase__ , use_itm_head=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Dict = hf_itm_model(UpperCamelCase__ , UpperCamelCase__ , use_itm_head=UpperCamelCase__ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 146 | 1 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCAmelCase__ : List[Any] = "pt"
elif is_tf_available():
UpperCAmelCase__ : Tuple = "tf"
else:
UpperCAmelCase__ : Dict = "jax"
class __lowercase ( lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = ByTaTokenizer
__UpperCAmelCase = False
def _a ( self) -> Union[str, Any]:
super().setUp()
__snake_case = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def _a ( self) -> Optional[int]:
return ByTaTokenizer.from_pretrained('google/byt5-small')
def _a ( self , **lowercase_) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_)
def _a ( self , lowercase_ , lowercase_=False , lowercase_=2_0 , lowercase_=5) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case = []
for i in range(len(lowercase_)):
try:
__snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase_)
except UnicodeDecodeError:
pass
toks.append((i, tok))
__snake_case = list(filter(lambda lowercase_: re.match(r'^[ a-zA-Z]+$' , t[1]) , lowercase_))
__snake_case = list(filter(lambda lowercase_: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase_) , lowercase_))
if max_length is not None and len(lowercase_) > max_length:
__snake_case = toks[:max_length]
if min_length is not None and len(lowercase_) < min_length and len(lowercase_) > 0:
while len(lowercase_) < min_length:
__snake_case = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case = [t[0] for t in toks]
# Ensure consistency
__snake_case = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_)
if " " not in output_txt and len(lowercase_) > 1:
__snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase_)
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase_)
)
if with_prefix_space:
__snake_case = ' ' + output_txt
__snake_case = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
return output_txt, output_ids
def _a ( self) -> Optional[Any]:
__snake_case = self.ta_base_tokenizer
__snake_case = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'])
__snake_case = tokenizer(['hi', 'I went to the gym', ''])
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'])
def _a ( self) -> Optional[Any]:
__snake_case = self.ta_base_tokenizer
__snake_case = 'Unicode €.'
__snake_case = tokenizer(lowercase_)
__snake_case = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded['input_ids'] , lowercase_)
# decoding
__snake_case = tokenizer.decode(lowercase_)
self.assertEqual(lowercase_ , 'Unicode €.</s>')
__snake_case = tokenizer('e è é ê ë')
__snake_case = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded['input_ids'] , lowercase_)
# decoding
__snake_case = tokenizer.decode(lowercase_)
self.assertEqual(lowercase_ , 'e è é ê ë</s>')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë')) , 'e è é ê ë</s>')
def _a ( self) -> Union[str, Any]:
__snake_case = self.ta_base_tokenizer
__snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
__snake_case = tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
if FRAMEWORK != "jax":
__snake_case = list(batch.input_ids.numpy()[0])
else:
__snake_case = list(batch.input_ids.tolist()[0])
self.assertListEqual(lowercase_ , lowercase_)
self.assertEqual((2, 3_7) , batch.input_ids.shape)
self.assertEqual((2, 3_7) , batch.attention_mask.shape)
def _a ( self) -> int:
__snake_case = self.ta_base_tokenizer
__snake_case = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case = tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase_)
self.assertIn('attention_mask' , lowercase_)
self.assertNotIn('decoder_input_ids' , lowercase_)
self.assertNotIn('decoder_attention_mask' , lowercase_)
def _a ( self) -> Optional[int]:
__snake_case = self.ta_base_tokenizer
__snake_case = [
'Summary of the text.',
'Another summary.',
]
__snake_case = tokenizer(
text_target=lowercase_ , max_length=3_2 , padding='max_length' , truncation=lowercase_ , return_tensors=lowercase_)
self.assertEqual(3_2 , targets['input_ids'].shape[1])
def _a ( self) -> Union[str, Any]:
__snake_case = self.ta_base_tokenizer
__snake_case = ['A long paragraph for summarization. </s>']
__snake_case = ['Summary of the text. </s>']
# fmt: off
__snake_case = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
__snake_case = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
__snake_case = tokenizer(lowercase_ , text_target=lowercase_)
self.assertEqual(lowercase_ , batch['input_ids'][0])
self.assertEqual(lowercase_ , batch['labels'][0])
def _a ( self) -> List[Any]:
# safety check on max_len default value so we are sure the test works
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length , 4_2)
# Now let's start the test
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case = tempfile.mkdtemp()
__snake_case = ' He is very happy, UNwant\u00E9d,running'
__snake_case = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
tokenizer.save_pretrained(lowercase_)
__snake_case = tokenizer.__class__.from_pretrained(lowercase_)
__snake_case = after_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
shutil.rmtree(lowercase_)
__snake_case = self.get_tokenizers(model_max_length=4_2)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case = tempfile.mkdtemp()
__snake_case = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'])
__snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token')
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
__snake_case = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
tokenizer.save_pretrained(lowercase_)
__snake_case = tokenizer.__class__.from_pretrained(lowercase_)
__snake_case = after_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 4_2)
__snake_case = tokenizer.__class__.from_pretrained(lowercase_ , model_max_length=4_3)
self.assertEqual(tokenizer.model_max_length , 4_3)
shutil.rmtree(lowercase_)
def _a ( self) -> Union[str, Any]:
__snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase_)
with open(os.path.join(lowercase_ , 'special_tokens_map.json') , encoding='utf-8') as json_file:
__snake_case = json.load(lowercase_)
with open(os.path.join(lowercase_ , 'tokenizer_config.json') , encoding='utf-8') as json_file:
__snake_case = json.load(lowercase_)
__snake_case = [F"<extra_id_{i}>" for i in range(1_2_5)]
__snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase_ , 'special_tokens_map.json') , 'w' , encoding='utf-8') as outfile:
json.dump(lowercase_ , lowercase_)
with open(os.path.join(lowercase_ , 'tokenizer_config.json') , 'w' , encoding='utf-8') as outfile:
json.dump(lowercase_ , lowercase_)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case = tokenizer_class.from_pretrained(
lowercase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase_)]
__snake_case = tokenizer_class.from_pretrained(
lowercase_ , additional_special_tokens=lowercase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens)
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'])) , )
def _a ( self) -> str:
__snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase_)
__snake_case = tokenizer_class.from_pretrained(lowercase_)
self.assertTrue(tokenizer.decode([2_5_5]) == '')
def _a ( self) -> Optional[Any]:
pass
def _a ( self) -> Optional[int]:
pass
def _a ( self) -> Union[str, Any]:
pass
def _a ( self) -> Optional[int]:
pass
def _a ( self) -> Optional[Any]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__snake_case = self.get_tokenizers(fast=lowercase_ , do_lower_case=lowercase_)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
__snake_case = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__snake_case = tokenizer.convert_tokens_to_string(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
def _a ( self) -> List[Any]:
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
__snake_case = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__snake_case = 0
__snake_case = tokenizer.convert_ids_to_tokens(
lowercase_ , skip_special_tokens=lowercase_)
for attr in attributes_list:
setattr(lowercase_ , attr + '_id' , lowercase_)
self.assertEqual(getattr(lowercase_ , lowercase_) , lowercase_)
self.assertEqual(getattr(lowercase_ , attr + '_id') , lowercase_)
setattr(lowercase_ , attr + '_id' , lowercase_)
self.assertEqual(getattr(lowercase_ , lowercase_) , lowercase_)
self.assertEqual(getattr(lowercase_ , attr + '_id') , lowercase_)
setattr(lowercase_ , 'additional_special_tokens_ids' , [])
self.assertListEqual(getattr(lowercase_ , 'additional_special_tokens') , [])
self.assertListEqual(getattr(lowercase_ , 'additional_special_tokens_ids') , [])
setattr(lowercase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters])
self.assertListEqual(getattr(lowercase_ , 'additional_special_tokens') , [token_to_test_setters])
self.assertListEqual(getattr(lowercase_ , 'additional_special_tokens_ids') , [token_id_to_test_setters])
| 313 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = CustomTokenizer
pass
| 313 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __UpperCamelCase :
lowercase : Optional[Any] = MBartConfig
lowercase : List[str] = {}
lowercase : Dict = 'gelu'
def __init__( self :Any ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :int=1_3 ,_UpperCamelCase :List[Any]=7 ,_UpperCamelCase :List[Any]=True ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :Any=9_9 ,_UpperCamelCase :Any=3_2 ,_UpperCamelCase :Optional[Any]=2 ,_UpperCamelCase :Any=4 ,_UpperCamelCase :Any=3_7 ,_UpperCamelCase :List[str]=0.1 ,_UpperCamelCase :List[str]=0.1 ,_UpperCamelCase :Optional[int]=2_0 ,_UpperCamelCase :Tuple=2 ,_UpperCamelCase :int=1 ,_UpperCamelCase :List[Any]=0 ,):
snake_case_ : Optional[int] = parent
snake_case_ : List[Any] = batch_size
snake_case_ : str = seq_length
snake_case_ : Any = is_training
snake_case_ : str = use_labels
snake_case_ : Dict = vocab_size
snake_case_ : Optional[int] = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Dict = max_position_embeddings
snake_case_ : Tuple = eos_token_id
snake_case_ : Dict = pad_token_id
snake_case_ : str = bos_token_id
def a__ ( self :Dict ):
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
snake_case_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
snake_case_ : Tuple = tf.concat([input_ids, eos_tensor] ,axis=1 )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case_ : Tuple = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
snake_case_ : Dict = prepare_mbart_inputs_dict(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
return config, inputs_dict
def a__ ( self :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :str ):
snake_case_ : Optional[int] = TFMBartModel(config=_UpperCamelCase ).get_decoder()
snake_case_ : Dict = inputs_dict["""input_ids"""]
snake_case_ : List[str] = input_ids[:1, :]
snake_case_ : Optional[int] = inputs_dict["""attention_mask"""][:1, :]
snake_case_ : str = inputs_dict["""head_mask"""]
snake_case_ : Dict = 1
# first forward pass
snake_case_ : Any = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,head_mask=_UpperCamelCase ,use_cache=_UpperCamelCase )
snake_case_ , snake_case_ : str = outputs.to_tuple()
snake_case_ : str = past_key_values[1]
def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Any=None , ):
'''simple docstring'''
if attention_mask is None:
snake_case_ : List[str] = tf.cast(tf.math.not_equal(lowerCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : int = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase : Any = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase : str = True
lowercase : Optional[Any] = False
lowercase : Union[str, Any] = False
def a__ ( self :List[str] ,_UpperCamelCase :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :List[str] ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def a__ ( self :int ):
snake_case_ : Tuple = TFMBartModelTester(self )
snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase )
def a__ ( self :List[Any] ):
self.config_tester.run_common_tests()
def a__ ( self :Optional[Any] ):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
lowercase : Dict = [
' UN Chief Says There Is No Military Solution in Syria',
]
lowercase : Optional[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
lowercase : Tuple = 'facebook/mbart-large-en-ro'
@cached_property
def a__ ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def a__ ( self :List[Any] ):
snake_case_ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def a__ ( self :List[str] ,**_UpperCamelCase :Any ):
snake_case_ : Tuple = self.translate_src_text(**_UpperCamelCase )
self.assertListEqual(self.expected_text ,_UpperCamelCase )
def a__ ( self :Dict ,**_UpperCamelCase :Union[str, Any] ):
snake_case_ : List[Any] = self.tokenizer(self.src_text ,**_UpperCamelCase ,return_tensors="""tf""" )
snake_case_ : str = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 )
snake_case_ : Optional[int] = self.tokenizer.batch_decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase )
return generated_words
@slow
def a__ ( self :int ):
self._assert_generated_batch_equal_expected() | 267 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A : Tuple = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 267 | 1 |
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[Any] = name
lowercase__ : Any = value
lowercase__ : Union[str, Any] = weight
def __repr__( self):
'''simple docstring'''
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowercase__ ( self):
'''simple docstring'''
return self.value
def lowercase__ ( self):
'''simple docstring'''
return self.name
def lowercase__ ( self):
'''simple docstring'''
return self.weight
def lowercase__ ( self):
'''simple docstring'''
return self.value / self.weight
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowercase__ : Dict = []
for i in range(len(lowercase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : str = sorted(lowercase_ , key=lowercase_ , reverse=lowercase_ )
lowercase__ : Optional[int] = []
lowercase__ , lowercase__ : List[str] = 0.0, 0.0
for i in range(len(lowercase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
lowerCamelCase__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase__ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 )
lowercase__ : List[str] = state_late + state_absent + state_ontime
lowercase__ : List[Any] = prizestrings
return prizestrings
def UpperCamelCase ( lowercase_ = 30 ) -> int:
'''simple docstring'''
return _calculate(lowercase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 12 | 1 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 393 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> int:
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = BlipImageProcessor()
lowerCAmelCase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
lowerCAmelCase = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
lowerCAmelCase = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self , **lowercase ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _snake_case ( self , **lowercase ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _snake_case ( self , **lowercase ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _snake_case ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> Any:
lowerCAmelCase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
lowerCAmelCase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_qformer_tokenizer()
lowerCAmelCase = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(lowercase , return_tensors="""np""" )
lowerCAmelCase = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_qformer_tokenizer()
lowerCAmelCase = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = processor(text=lowercase )
lowerCAmelCase = tokenizer(lowercase , return_token_type_ids=lowercase )
lowerCAmelCase = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_qformer_tokenizer()
lowerCAmelCase = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _snake_case ( self ) -> str:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_qformer_tokenizer()
lowerCAmelCase = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(lowercase )
lowerCAmelCase = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_qformer_tokenizer()
lowerCAmelCase = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 393 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class A ( unittest.TestCase ):
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ = 0
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def A__ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = Path(lowerCamelCase__ ) / """preprocessor_config.json"""
lowercase__ = Path(lowerCamelCase__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase__ , """w""" ) )
lowercase__ = AutoImageProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = Path(lowerCamelCase__ ) / """preprocessor_config.json"""
lowercase__ = Path(lowerCamelCase__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase__ , """w""" ) )
lowercase__ = AutoImageProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def A__ ( self ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase__ = Path(lowerCamelCase__ ) / """preprocessor_config.json"""
lowercase__ = Path(lowerCamelCase__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase__ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase__ = AutoImageProcessor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop("""image_processor_type""" )
lowercase__ = CLIPImageProcessor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
lowercase__ = AutoImageProcessor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
lowercase__ = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def A__ ( self ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = Path(lowerCamelCase__ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase__ , """w""" ) , )
lowercase__ = AutoImageProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def A__ ( self ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ , """clip-base is not a local folder and is not a valid model identifier""" ):
lowercase__ = AutoImageProcessor.from_pretrained("""clip-base""" )
def A__ ( self ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowercase__ = AutoImageProcessor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def A__ ( self ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowercase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
lowercase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
lowercase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase__ )
lowercase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase__ )
lowercase__ = AutoImageProcessor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def A__ ( self ) -> str:
'''simple docstring'''
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoImageProcessor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoImageProcessor.register(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = Path(lowerCamelCase__ ) / """preprocessor_config.json"""
lowercase__ = Path(lowerCamelCase__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(lowerCamelCase__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCamelCase__ , """w""" ) )
lowercase__ = CustomImageProcessor.from_pretrained(lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase__ )
lowercase__ = AutoImageProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self ) -> Any:
'''simple docstring'''
class A ( __UpperCAmelCase ):
lowerCamelCase : Optional[int] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoImageProcessor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local
lowercase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase__ = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 325 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__A = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class A ( unittest.TestCase ):
@classmethod
def A__ ( cls ) -> List[Any]:
'''simple docstring'''
lowercase__ = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def A__ ( cls ) -> Tuple:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase__ = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
lowercase__ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
lowercase__ = flatten_dict(unfreeze(model.params ) )
lowercase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ , repo_id="""test-model-flax""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
lowercase__ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
lowercase__ = flatten_dict(unfreeze(model.params ) )
lowercase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1e-3 , msg=F'''{key} not identical''' )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase__ = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
lowercase__ = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
lowercase__ = flatten_dict(unfreeze(model.params ) )
lowercase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCamelCase__ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
lowercase__ = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
lowercase__ = flatten_dict(unfreeze(model.params ) )
lowercase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1e-3 , msg=F'''{key} not identical''' )
def _A ( lowercase__ , lowercase__ ):
lowercase__ = True
lowercase__ = flatten_dict(modela.params )
lowercase__ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
lowercase__ = False
return models_are_equal
@require_flax
class A ( unittest.TestCase ):
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowercase__ = FlaxBertModel(lowerCamelCase__ )
lowercase__ = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) )
with self.assertRaises(lowerCamelCase__ ):
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ )
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__ , lowerCamelCase__ ) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowercase__ = FlaxBertModel(lowerCamelCase__ )
lowercase__ = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , max_shard_size="""10KB""" )
with self.assertRaises(lowerCamelCase__ ):
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ )
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__ , lowerCamelCase__ ) )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ = """bert"""
lowercase__ = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(lowerCamelCase__ ):
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ )
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = """bert"""
lowercase__ = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(lowerCamelCase__ ):
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ )
lowercase__ = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 325 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
snake_case_ = sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 'Input must be a string of 8 numbers plus letter'
__SCREAMING_SNAKE_CASE : Dict = 'TRWAGMYFPDXBNJZSQVHLCKE'
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = f"""Expected string as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(_SCREAMING_SNAKE_CASE )
snake_case_ = spanish_id.replace("""-""" , """""" ).upper()
if len(_SCREAMING_SNAKE_CASE ) != 9:
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
snake_case_ = int(spanish_id_clean[0:8] )
snake_case_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_SCREAMING_SNAKE_CASE ) from ex
if letter.isdigit():
raise ValueError(_SCREAMING_SNAKE_CASE )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 0 |
import os
from pathlib import Path
def UpperCamelCase ( )-> Tuple:
"""simple docstring"""
from torch.utils.cpp_extension import load
A__ = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
A__ = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 491 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
A__ = 10
def __A ( self ):
A__ = [1, 2, 3, 4]
A__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
A__ , A__ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __A ( self ):
A__ = ""
A__ , A__ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __A ( self ):
A__ = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
A__ , A__ = process_story(UpperCAmelCase__ )
A__ = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = ["It was the best of times."]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ):
A__ = torch.tensor([1, 2, 3, 4] )
A__ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = 101
A__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A__ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 491 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class snake_case ( _UpperCamelCase ):
"""simple docstring"""
_a = """xmod"""
def __init__( self, _lowercase=30522, _lowercase=768, _lowercase=12, _lowercase=12, _lowercase=3072, _lowercase="gelu", _lowercase=0.1, _lowercase=0.1, _lowercase=512, _lowercase=2, _lowercase=0.02, _lowercase=1E-12, _lowercase=1, _lowercase=0, _lowercase=2, _lowercase="absolute", _lowercase=True, _lowercase=None, _lowercase=False, _lowercase=2, _lowercase=False, _lowercase=True, _lowercase=True, _lowercase=("en_XX",), _lowercase=None, **_lowercase, ) -> Union[str, Any]:
super().__init__(pad_token_id=_lowercase, bos_token_id=_lowercase, eos_token_id=_lowercase, **_lowercase )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = classifier_dropout
SCREAMING_SNAKE_CASE_ = pre_norm
SCREAMING_SNAKE_CASE_ = adapter_reduction_factor
SCREAMING_SNAKE_CASE_ = adapter_layer_norm
SCREAMING_SNAKE_CASE_ = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE_ = ln_before_adapter
SCREAMING_SNAKE_CASE_ = list(_lowercase )
SCREAMING_SNAKE_CASE_ = default_language
class snake_case ( _UpperCamelCase ):
"""simple docstring"""
@property
def a__ ( self ) -> Optional[int]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 715 |
'''simple docstring'''
class snake_case :
"""simple docstring"""
def __init__( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = {}
def a__ ( self, _lowercase ) -> int:
if vertex not in self.adjacency:
SCREAMING_SNAKE_CASE_ = {}
self.num_vertices += 1
def a__ ( self, _lowercase, _lowercase, _lowercase ) -> List[str]:
self.add_vertex(_lowercase )
self.add_vertex(_lowercase )
if head == tail:
return
SCREAMING_SNAKE_CASE_ = weight
SCREAMING_SNAKE_CASE_ = weight
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
edges.remove((tail, head, weight) )
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE_ = list(edges[i] )
edges.sort(key=lambda _lowercase : e[2] )
for i in range(len(_lowercase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
SCREAMING_SNAKE_CASE_ = edges[i][2] + 1
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
SCREAMING_SNAKE_CASE_ = weight
SCREAMING_SNAKE_CASE_ = weight
def __str__( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
SCREAMING_SNAKE_CASE_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def a__ ( _lowercase=None, _lowercase=None ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = Graph()
if vertices is None:
SCREAMING_SNAKE_CASE_ = []
if edges is None:
SCREAMING_SNAKE_CASE_ = []
for vertex in vertices:
g.add_vertex(_lowercase )
for edge in edges:
g.add_edge(*_lowercase )
return g
class snake_case :
"""simple docstring"""
def __init__( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {}
def __len__( self ) -> Any:
return len(self.parent )
def a__ ( self, _lowercase ) -> Any:
if item in self.parent:
return self.find(_lowercase )
SCREAMING_SNAKE_CASE_ = item
SCREAMING_SNAKE_CASE_ = 0
return item
def a__ ( self, _lowercase ) -> List[str]:
if item not in self.parent:
return self.make_set(_lowercase )
if item != self.parent[item]:
SCREAMING_SNAKE_CASE_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self, _lowercase, _lowercase ) -> Any:
SCREAMING_SNAKE_CASE_ = self.find(_lowercase )
SCREAMING_SNAKE_CASE_ = self.find(_lowercase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
SCREAMING_SNAKE_CASE_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
SCREAMING_SNAKE_CASE_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
SCREAMING_SNAKE_CASE_ = roota
return roota
return None
@staticmethod
def a__ ( _lowercase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = graph.num_vertices
SCREAMING_SNAKE_CASE_ = Graph.UnionFind()
SCREAMING_SNAKE_CASE_ = []
while num_components > 1:
SCREAMING_SNAKE_CASE_ = {}
for vertex in graph.get_vertices():
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = graph.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
SCREAMING_SNAKE_CASE_ = union_find.find(_lowercase )
SCREAMING_SNAKE_CASE_ = union_find.find(_lowercase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cheap_edge[vertex]
if union_find.find(_lowercase ) != union_find.find(_lowercase ):
union_find.union(_lowercase, _lowercase )
mst_edges.append(cheap_edge[vertex] )
SCREAMING_SNAKE_CASE_ = num_components - 1
SCREAMING_SNAKE_CASE_ = Graph.build(edges=_lowercase )
return mst
| 238 | 0 |
UpperCAmelCase_ = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> int:
_A = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase_ = [None] * 1_0_0_0_0_0_0_0
UpperCAmelCase_ = True
UpperCAmelCase_ = False
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_A = chain(next_number(_snake_case ) )
_A = number_chain
while number < 10_000_000:
_A = number_chain
number *= 10
return number_chain
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_000_000 ) -> int:
for i in range(1 , _snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 2 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 0 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class snake_case :
'''simple docstring'''
def __init__( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Dict = [2, 1, 2, -1]
__UpperCAmelCase : str = [1, 2, 3, 4]
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(self.first_signal )
__UpperCAmelCase : Union[str, Any] = len(self.second_signal )
__UpperCAmelCase : Union[str, Any] = max(__a , __a )
# create a zero matrix of max_length x max_length
__UpperCAmelCase : List[str] = [[0] * max_length for i in range(__a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__a ):
__UpperCAmelCase : Any = deque(self.second_signal )
rotated_signal.rotate(__a )
for j, item in enumerate(__a ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase : Tuple = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod() | 720 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Dict = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__UpperCAmelCase : Tuple = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ :Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 374 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.