code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A_ ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Distribution , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Union[str, Any]=0 ) -> Optional[int]:
UpperCAmelCase : str = 1.0 if scale is None else scale
UpperCAmelCase : List[Any] = 0.0 if loc is None else loc
super().__init__(A_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=A_ )] )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase_ ( self : Any ) -> str:
return self.variance.sqrt()
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : int , lowercase_ : Dict[str, int] , lowercase_ : Callable[..., Tuple[torch.Tensor]] , **lowercase_ : int ) -> None:
super().__init__(**A_ )
UpperCAmelCase : List[Any] = args_dim
UpperCAmelCase : Optional[Any] = nn.ModuleList([nn.Linear(A_ , A_ ) for dim in args_dim.values()] )
UpperCAmelCase : Optional[Any] = domain_map
def UpperCAmelCase_ ( self : str , lowercase_ : torch.Tensor ) -> Tuple[torch.Tensor]:
UpperCAmelCase : Optional[int] = [proj(A_ ) for proj in self.proj]
return self.domain_map(*A_ )
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Union[str, Any] ) -> int:
super().__init__()
UpperCAmelCase : int = function
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict , *lowercase_ : Optional[Any] ) -> Optional[Any]:
return self.function(A_ , *A_ )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : type
UpperCAmelCase_ : int
UpperCAmelCase_ : Dict[str, int]
def __init__( self : Optional[int] , lowercase_ : int = 1 ) -> None:
UpperCAmelCase : str = dim
UpperCAmelCase : Optional[Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase_ ( self : str , lowercase_ : Optional[int] ) -> List[str]:
if self.dim == 1:
return self.distribution_class(*A_ )
else:
return Independent(self.distribution_class(*A_ ) , 1 )
def UpperCAmelCase_ ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[torch.Tensor] = None , ) -> Distribution:
UpperCAmelCase : Tuple = self._base_distribution(A_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(A_ , loc=A_ , scale=A_ , event_dim=self.event_dim )
@property
def UpperCAmelCase_ ( self : str ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase_ ( self : str ) -> int:
return len(self.event_shape )
@property
def UpperCAmelCase_ ( self : Any ) -> float:
return 0.0
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int ) -> nn.Module:
return ParameterProjection(
in_features=A_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def UpperCAmelCase_ ( self : str , *lowercase_ : torch.Tensor ) -> Optional[int]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase_ ( lowercase_ : torch.Tensor ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(A_ ) + 4.0 )) / 2.0
class A_ ( _UpperCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
UpperCAmelCase_ : type = StudentT
@classmethod
def UpperCAmelCase_ ( cls : Optional[int] , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor ) -> Tuple:
UpperCAmelCase : List[Any] = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase : List[str] = 2.0 + cls.squareplus(A_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A_ ( _UpperCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict[str, int] = {"loc": 1, "scale": 1}
UpperCAmelCase_ : type = Normal
@classmethod
def UpperCAmelCase_ ( cls : Any , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor ) -> List[str]:
UpperCAmelCase : List[Any] = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A_ ( _UpperCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict[str, int] = {"total_count": 1, "logits": 1}
UpperCAmelCase_ : type = NegativeBinomial
@classmethod
def UpperCAmelCase_ ( cls : Union[str, Any] , lowercase_ : torch.Tensor , lowercase_ : torch.Tensor ) -> str:
UpperCAmelCase : Optional[int] = cls.squareplus(A_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Union[str, Any] ) -> Distribution:
UpperCAmelCase , UpperCAmelCase : Dict = distr_args
if self.dim == 1:
return self.distribution_class(total_count=A_ , logits=A_ )
else:
return Independent(self.distribution_class(total_count=A_ , logits=A_ ) , 1 )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[torch.Tensor] = None ) -> Distribution:
UpperCAmelCase , UpperCAmelCase : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 705 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 0 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCamelCase( UpperCAmelCase_ ):
def is_in_circle(UpperCAmelCase_ , UpperCAmelCase_ ) -> bool:
UpperCAmelCase : str = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase : Optional[Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase : Optional[Any] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0.0 , UpperCAmelCase_ = 1.0 , ):
return mean(
function_to_integrate(uniform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for _ in range(_SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = 0.0 , UpperCAmelCase_ = 1.0 ):
def identity_function(UpperCAmelCase_ ) -> float:
return x
UpperCAmelCase : Tuple = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print('******************' )
def UpperCamelCase( UpperCAmelCase_ ):
def function_to_integrate(UpperCAmelCase_ ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase : Optional[Any] = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 0 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = F"""{sampling_rate}"""
UpperCAmelCase : List[Any] = """1"""
UpperCAmelCase : Optional[int] = """f32le"""
UpperCAmelCase : str = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(UpperCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase : Dict = ffmpeg_process.communicate(UpperCAmelCase_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
UpperCAmelCase : Any = output_stream[0]
UpperCAmelCase : Optional[Any] = np.frombuffer(UpperCAmelCase_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = "f32le" , ):
UpperCAmelCase : int = F"""{sampling_rate}"""
UpperCAmelCase : Dict = """1"""
if format_for_conversion == "s16le":
UpperCAmelCase : str = 2
elif format_for_conversion == "f32le":
UpperCAmelCase : Any = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
UpperCAmelCase : List[str] = platform.system()
if system == "Linux":
UpperCAmelCase : str = """alsa"""
UpperCAmelCase : Dict = """default"""
elif system == "Darwin":
UpperCAmelCase : Optional[int] = """avfoundation"""
UpperCAmelCase : Optional[int] = """:0"""
elif system == "Windows":
UpperCAmelCase : str = """dshow"""
UpperCAmelCase : Tuple = """default"""
UpperCAmelCase : Tuple = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
UpperCAmelCase : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase : Dict = _ffmpeg_stream(UpperCAmelCase_ , UpperCAmelCase_ )
for item in iterator:
yield item
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = "f32le" , ):
if stream_chunk_s is not None:
UpperCAmelCase : Optional[Any] = stream_chunk_s
else:
UpperCAmelCase : Union[str, Any] = chunk_length_s
UpperCAmelCase : Any = ffmpeg_microphone(UpperCAmelCase_ , UpperCAmelCase_ , format_for_conversion=UpperCAmelCase_ )
if format_for_conversion == "s16le":
UpperCAmelCase : Any = np.intaa
UpperCAmelCase : List[Any] = 2
elif format_for_conversion == "f32le":
UpperCAmelCase : Dict = np.floataa
UpperCAmelCase : Tuple = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
UpperCAmelCase : List[Any] = chunk_length_s / 6
UpperCAmelCase : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCAmelCase_ , (int, float) ):
UpperCAmelCase : List[Any] = [stride_length_s, stride_length_s]
UpperCAmelCase : int = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase : int = datetime.datetime.now()
UpperCAmelCase : Optional[int] = datetime.timedelta(seconds=UpperCAmelCase_ )
for item in chunk_bytes_iter(UpperCAmelCase_ , UpperCAmelCase_ , stride=(stride_left, stride_right) , stream=UpperCAmelCase_ ):
# Put everything back in numpy scale
UpperCAmelCase : Optional[int] = np.frombuffer(item['raw'] , dtype=UpperCAmelCase_ )
UpperCAmelCase : List[str] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
UpperCAmelCase : int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False ):
UpperCAmelCase : Optional[Any] = B""""""
UpperCAmelCase : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
UpperCAmelCase : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(UpperCAmelCase_ ) < chunk_len:
UpperCAmelCase : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCAmelCase_ ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase : List[str] = (_stride_left, stride_right)
UpperCAmelCase : Optional[int] = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
UpperCAmelCase : Any = False
yield item
UpperCAmelCase : str = stride_left
UpperCAmelCase : Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCAmelCase_ ) > stride_left:
UpperCAmelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
UpperCAmelCase : List[str] = False
yield item
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(UpperCAmelCase_ , stdout=subprocess.PIPE , bufsize=UpperCAmelCase_ ) as ffmpeg_process:
while True:
UpperCAmelCase : List[str] = ffmpeg_process.stdout.read(UpperCAmelCase_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 708 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCAmelCase : int = list(lowerCAmelCase__ )
UpperCAmelCase : Tuple = list(lowerCAmelCase__ )
UpperCAmelCase : Optional[Any] = 0
for i in range(len(lowerCAmelCase__ ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Dict = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase__ )
def UpperCamelCase( UpperCAmelCase_ ):
'''simple docstring'''
UpperCAmelCase : Any = []
while True:
UpperCAmelCase : List[Any] = ['$'] * len(lowerCAmelCase__ )
UpperCAmelCase : int = []
for i in range(len(lowerCAmelCase__ ) ):
for j in range(i + 1 , len(lowerCAmelCase__ ) ):
UpperCAmelCase : Optional[Any] = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Union[str, Any] = '*'
UpperCAmelCase : Any = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase__ ) == 0:
return pi
UpperCAmelCase : List[str] = list(set(lowerCAmelCase__ ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCAmelCase : Any = []
for minterm in minterms:
UpperCAmelCase : Any = ''
for _ in range(lowerCAmelCase__ ):
UpperCAmelCase : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase__ )
return temp
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = list(lowerCAmelCase__ )
UpperCAmelCase : Any = list(lowerCAmelCase__ )
UpperCAmelCase : List[str] = 0
for i in range(len(lowerCAmelCase__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCAmelCase : List[str] = []
UpperCAmelCase : str = [0] * len(lowerCAmelCase__ )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Optional[int] = -1
for j in range(len(lowerCAmelCase__ ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : Tuple = j
if count == 1:
UpperCAmelCase : Optional[int] = 1
for i in range(len(lowerCAmelCase__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : List[str] = -1
UpperCAmelCase : Tuple = 0
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase : Optional[int] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : str = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase : str = 0
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCAmelCase : Dict = [[0 for x in range(len(lowerCAmelCase__ ) )] for x in range(len(lowerCAmelCase__ ) )]
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase : List[Any] = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase__ ):
UpperCAmelCase : Optional[int] = 1
return chart
def UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase : Any = int(input('Enter the no. of variables\n' ) )
UpperCAmelCase : str = [
float(lowerCAmelCase__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
UpperCAmelCase : str = decimal_to_binary(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase : List[Any] = check(lowerCAmelCase__ )
print('Prime Implicants are:' )
print(lowerCAmelCase__ )
UpperCAmelCase : Dict = prime_implicant_chart(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase : Union[str, Any] = selection(lowerCAmelCase__ , lowerCAmelCase__ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def UpperCamelCase( UpperCAmelCase_ = None ):
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
UpperCAmelCase : Dict = nums[0]
for i in range(1 , len(UpperCAmelCase_ ) ):
UpperCAmelCase : Tuple = nums[i]
UpperCAmelCase : int = max(UpperCAmelCase_ , ans + num , UpperCAmelCase_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase__ : Optional[Any] = int(input("Enter number of elements : ").strip())
lowercase__ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 710 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class A_ ( lowercase_ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''dpr'''
def __init__( self : List[Any] , lowercase_ : int=30_522 , lowercase_ : str=768 , lowercase_ : str=12 , lowercase_ : Any=12 , lowercase_ : Optional[int]=3_072 , lowercase_ : str="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Any=0.1 , lowercase_ : List[str]=512 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : List[str]=1E-12 , lowercase_ : str=0 , lowercase_ : Tuple="absolute" , lowercase_ : int = 0 , **lowercase_ : Union[str, Any] , ) -> List[str]:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase : List[str] = vocab_size
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[str] = type_vocab_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Optional[int] = projection_dim
UpperCAmelCase : int = position_embedding_type
| 711 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCamelCase( UpperCAmelCase_ ):
return EnvironmentCommand()
def UpperCamelCase( UpperCAmelCase_ ):
return EnvironmentCommand(args.accelerate_config_file )
class A_ ( __UpperCAmelCase ):
'''simple docstring'''
@staticmethod
def UpperCAmelCase_ ( lowercase_ : ArgumentParser ) -> str:
UpperCAmelCase : Optional[int] = parser.add_parser('env' )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
'--accelerate-config_file' , default=lowerCAmelCase_ , help='The accelerate config file to use for the default values in the launching script.' , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : Any , lowercase_ : Optional[Any] , *lowercase_ : Optional[Any] ) -> Any:
UpperCAmelCase : Any = accelerate_config_file
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : int = 'not installed'
if is_safetensors_available():
import safetensors
UpperCAmelCase : Dict = safetensors.__version__
elif importlib.util.find_spec('safetensors' ) is not None:
import safetensors
UpperCAmelCase : Tuple = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
UpperCAmelCase : Tuple = 'not installed'
UpperCAmelCase : Optional[Any] = 'not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
UpperCAmelCase : Dict = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
UpperCAmelCase : int = load_config_from_file(self._accelerate_config_file ).to_dict()
UpperCAmelCase : Optional[Any] = (
'\n'.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else f"""\t{accelerate_config}"""
)
UpperCAmelCase : List[Any] = 'not installed'
UpperCAmelCase : List[str] = 'NA'
if is_torch_available():
import torch
UpperCAmelCase : Optional[Any] = torch.__version__
UpperCAmelCase : List[Any] = torch.cuda.is_available()
UpperCAmelCase : Tuple = 'not installed'
UpperCAmelCase : List[str] = 'NA'
if is_tf_available():
import tensorflow as tf
UpperCAmelCase : Optional[int] = tf.__version__
try:
# deprecated in v2.1
UpperCAmelCase : List[Any] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
UpperCAmelCase : List[Any] = bool(tf.config.list_physical_devices('GPU' ) )
UpperCAmelCase : List[Any] = 'not installed'
UpperCAmelCase : str = 'not installed'
UpperCAmelCase : Dict = 'not installed'
UpperCAmelCase : Tuple = 'NA'
if is_flax_available():
import flax
import jax
import jaxlib
UpperCAmelCase : int = flax.__version__
UpperCAmelCase : List[Any] = jax.__version__
UpperCAmelCase : List[Any] = jaxlib.__version__
UpperCAmelCase : int = jax.lib.xla_bridge.get_backend().platform
UpperCAmelCase : Tuple = {
'`transformers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Huggingface_hub version': huggingface_hub.__version__,
'Safetensors version': f"""{safetensors_version}""",
'Accelerate version': f"""{accelerate_version}""",
'Accelerate config': f"""{accelerate_config_str}""",
'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""",
'Tensorflow version (GPU?)': f"""{tf_version} ({tf_cuda_available})""",
'Flax version (CPU?/GPU?/TPU?)': f"""{flax_version} ({jax_backend})""",
'Jax version': f"""{jax_version}""",
'JaxLib version': f"""{jaxlib_version}""",
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def UpperCAmelCase_ ( lowercase_ : Union[str, Any] ) -> Tuple:
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 712 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 0 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=True , UpperCAmelCase_="pt" ):
UpperCAmelCase : int = {'''add_prefix_space''': True} if isinstance(a_ , a_ ) and not line.startswith(' ' ) else {}
UpperCAmelCase : str = padding_side
return tokenizer(
[line] , max_length=a_ , padding='max_length' if pad_to_max_length else None , truncation=a_ , return_tensors=a_ , add_special_tokens=a_ , **a_ , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , ):
UpperCAmelCase : Tuple = input_ids.ne(a_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A_ ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]="train" , lowercase_ : Any=None , lowercase_ : int=None , lowercase_ : Tuple=None , lowercase_ : List[Any]="" , ) -> Optional[int]:
super().__init__()
UpperCAmelCase : Optional[int] = Path(_UpperCAmelCase ).joinpath(type_path + '.source' )
UpperCAmelCase : Union[str, Any] = Path(_UpperCAmelCase ).joinpath(type_path + '.target' )
UpperCAmelCase : List[str] = self.get_char_lens(self.src_file )
UpperCAmelCase : Optional[int] = max_source_length
UpperCAmelCase : Dict = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
UpperCAmelCase : List[Any] = tokenizer
UpperCAmelCase : List[Any] = prefix
if n_obs is not None:
UpperCAmelCase : str = self.src_lens[:n_obs]
UpperCAmelCase : List[str] = src_lang
UpperCAmelCase : Optional[int] = tgt_lang
def __len__( self : Dict ) -> str:
return len(self.src_lens )
def __getitem__( self : Tuple , lowercase_ : List[str] ) -> int:
UpperCAmelCase : Optional[int] = index + 1 # linecache starts at 1
UpperCAmelCase : Optional[int] = self.prefix + linecache.getline(str(self.src_file ) , _UpperCAmelCase ).rstrip('\n' )
UpperCAmelCase : str = linecache.getline(str(self.tgt_file ) , _UpperCAmelCase ).rstrip('\n' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase : List[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCAmelCase ) else self.tokenizer
)
UpperCAmelCase : List[str] = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCAmelCase ) else self.tokenizer
UpperCAmelCase : Tuple = encode_line(_UpperCAmelCase , _UpperCAmelCase , self.max_source_length , 'right' )
UpperCAmelCase : int = encode_line(_UpperCAmelCase , _UpperCAmelCase , self.max_target_length , 'right' )
UpperCAmelCase : List[str] = source_inputs['''input_ids'''].squeeze()
UpperCAmelCase : Optional[int] = target_inputs['''input_ids'''].squeeze()
UpperCAmelCase : str = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase_ ( lowercase_ : List[str] ) -> Any:
return [len(_UpperCAmelCase ) for x in Path(_UpperCAmelCase ).open().readlines()]
def UpperCAmelCase_ ( self : int , lowercase_ : Union[str, Any] ) -> Tuple:
UpperCAmelCase : List[str] = torch.stack([x['input_ids'] for x in batch] )
UpperCAmelCase : Any = torch.stack([x['attention_mask'] for x in batch] )
UpperCAmelCase : Optional[int] = torch.stack([x['decoder_input_ids'] for x in batch] )
UpperCAmelCase : List[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCAmelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCAmelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase : Dict = trim_batch(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase : Dict = trim_batch(_UpperCAmelCase , _UpperCAmelCase , attention_mask=_UpperCAmelCase )
UpperCAmelCase : Optional[Any] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowercase__ = getLogger(__name__)
def UpperCamelCase( UpperCAmelCase_ ):
return list(itertools.chain.from_iterable(a_ ) )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Any = get_git_info()
save_json(a_ , os.path.join(a_ , 'git_log.json' ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=4 , **UpperCAmelCase_ ):
with open(a_ , 'w' ) as f:
json.dump(a_ , a_ , indent=a_ , **a_ )
def UpperCamelCase( UpperCAmelCase_ ):
with open(a_ ) as f:
return json.load(a_ )
def UpperCamelCase( ):
UpperCAmelCase : Dict = git.Repo(search_parent_directories=a_ )
UpperCAmelCase : Optional[Any] = {
'''repo_id''': str(a_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return list(map(a_ , a_ ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
with open(a_ , 'wb' ) as f:
return pickle.dump(a_ , a_ )
def UpperCamelCase( UpperCAmelCase_ ):
def remove_articles(UpperCAmelCase_ ):
return re.sub(R'\b(a|an|the)\b' , ' ' , a_ )
def white_space_fix(UpperCAmelCase_ ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase_ ):
UpperCAmelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a_ ) ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = normalize_answer(a_ ).split()
UpperCAmelCase : str = normalize_answer(a_ ).split()
UpperCAmelCase : Optional[Any] = Counter(a_ ) & Counter(a_ )
UpperCAmelCase : int = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase : str = 1.0 * num_same / len(a_ )
UpperCAmelCase : Any = 1.0 * num_same / len(a_ )
UpperCAmelCase : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return normalize_answer(a_ ) == normalize_answer(a_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
assert len(a_ ) == len(a_ )
UpperCAmelCase : Dict = 0
for hypo, pred in zip(a_ , a_ ):
em += exact_match_score(a_ , a_ )
if len(a_ ) > 0:
em /= len(a_ )
return {"em": em}
def UpperCamelCase( UpperCAmelCase_ ):
return model_prefix.startswith('rag' )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : str = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase : Union[str, Any] = '''dropout_rate'''
for p in extra_params:
if getattr(a_ , a_ , a_ ):
if not hasattr(a_ , a_ ) and not hasattr(a_ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(a_ ) )
delattr(a_ , a_ )
continue
UpperCAmelCase : Union[str, Any] = p if hasattr(a_ , a_ ) else equivalent_param[p]
setattr(a_ , a_ , getattr(a_ , a_ ) )
delattr(a_ , a_ )
return hparams, config
| 713 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
UpperCAmelCase : Optional[Any] = {
'input_ids': tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCAmelCase : int = model(_A )['last_hidden_state']
UpperCAmelCase : Any = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _A )
# compare the actual values for a slice.
UpperCAmelCase : str = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 714 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 0 |
lowercase__ = [0, 2, 4, 6, 8]
lowercase__ = [1, 3, 5, 7, 9]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
UpperCAmelCase : Optional[Any] = 0
for digit in range(10 ):
UpperCAmelCase : Dict = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCAmelCase__ , UpperCAmelCase__ )
return result
UpperCAmelCase : Union[str, Any] = 0
for digita in range(10 ):
UpperCAmelCase : int = digita
if (remainder + digita) % 2 == 0:
UpperCAmelCase : List[Any] = ODD_DIGITS
else:
UpperCAmelCase : str = EVEN_DIGITS
for digita in other_parity_digits:
UpperCAmelCase : int = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCAmelCase__ , UpperCAmelCase__ , )
return result
def UpperCamelCase( UpperCAmelCase_ = 9 ):
UpperCAmelCase : List[str] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCAmelCase__ , 0 , [0] * length , UpperCAmelCase__ )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
'''simple docstring'''
from functools import lru_cache
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_UpperCamelCase )
if n > 1:
factors.add(_UpperCamelCase )
return factors
@lru_cache
def UpperCamelCase( UpperCAmelCase_ ):
return len(unique_prime_factors(_UpperCamelCase ) )
def UpperCamelCase( UpperCAmelCase_ ):
return len(set(_UpperCamelCase ) ) in (0, 1)
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : int = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Dict = [base + i for i in range(_UpperCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : List[Any] = [upf_len(_UpperCamelCase ) for x in group]
checker.append(_UpperCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_UpperCamelCase ):
return group
# Increment our base variable by 1
base += 1
def UpperCamelCase( UpperCAmelCase_ = 4 ):
UpperCAmelCase : Union[str, Any] = run(_UpperCamelCase )
return results[0] if len(_UpperCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 716 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : List[str] ) -> Optional[Any]:
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 717 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = "pegasus"
UpperCAmelCase_ : int = ["past_key_values"]
UpperCAmelCase_ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , lowercase_ : Dict=50_265 , lowercase_ : Tuple=1_024 , lowercase_ : str=12 , lowercase_ : Dict=4_096 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=12 , lowercase_ : str=4_096 , lowercase_ : str=16 , lowercase_ : Dict=0.0 , lowercase_ : Any=0.0 , lowercase_ : Dict=True , lowercase_ : Union[str, Any]=True , lowercase_ : Dict="gelu" , lowercase_ : int=1_024 , lowercase_ : Dict=0.1 , lowercase_ : Any=0.0 , lowercase_ : int=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : List[str]=0 , lowercase_ : Tuple=False , lowercase_ : List[Any]=0 , lowercase_ : List[str]=1 , lowercase_ : Tuple=1 , **lowercase_ : List[Any] , ) -> Optional[Any]:
UpperCAmelCase : Tuple = vocab_size
UpperCAmelCase : int = max_position_embeddings
UpperCAmelCase : Optional[int] = d_model
UpperCAmelCase : Dict = encoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Union[str, Any] = encoder_attention_heads
UpperCAmelCase : Any = decoder_ffn_dim
UpperCAmelCase : Union[str, Any] = decoder_layers
UpperCAmelCase : Optional[int] = decoder_attention_heads
UpperCAmelCase : Any = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Optional[int] = activation_dropout
UpperCAmelCase : Union[str, Any] = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Any = encoder_layerdrop
UpperCAmelCase : int = decoder_layerdrop
UpperCAmelCase : Optional[Any] = use_cache
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , forced_eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self : Any ) -> int:
return self.d_model
| 718 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class A_ ( UpperCamelCase_ ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int ) -> List[str]:
UpperCAmelCase : Any = SMALL_MODEL_IDENTIFIER
UpperCAmelCase : List[str] = '''pt'''
UpperCAmelCase : Optional[Any] = '''tf'''
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Tuple ) -> List[Any]:
UpperCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : Union[str, Any] ) -> Dict:
UpperCAmelCase : Optional[int] = TFAutoModel.from_pretrained(self.test_model , from_pt=lowercase_ )
model_tf.save_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : str = '''mock_framework'''
# Framework provided - return whatever the user provides
UpperCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowercase_ )
UpperCAmelCase : int = FeaturesManager.determine_framework(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowercase_ )
UpperCAmelCase : str = FeaturesManager.determine_framework(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowercase_ )
UpperCAmelCase : Tuple = FeaturesManager.determine_framework(lowercase_ )
self.assertEqual(lowercase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowercase_ )
UpperCAmelCase : Tuple = FeaturesManager.determine_framework(lowercase_ )
self.assertEqual(lowercase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowercase_ ):
UpperCAmelCase : List[str] = FeaturesManager.determine_framework(lowercase_ )
def UpperCAmelCase_ ( self : str ) -> str:
UpperCAmelCase : int = MagicMock(return_value=lowercase_ )
with patch('transformers.onnx.features.is_tf_available' , lowercase_ ):
UpperCAmelCase : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
UpperCAmelCase : Tuple = MagicMock(return_value=lowercase_ )
with patch('transformers.onnx.features.is_torch_available' , lowercase_ ):
UpperCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase_ , self.framework_tf )
# Both in environment -> use PyTorch
UpperCAmelCase : Tuple = MagicMock(return_value=lowercase_ )
UpperCAmelCase : Union[str, Any] = MagicMock(return_value=lowercase_ )
with patch('transformers.onnx.features.is_tf_available' , lowercase_ ), patch(
'transformers.onnx.features.is_torch_available' , lowercase_ ):
UpperCAmelCase : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase_ , self.framework_pt )
# Both not in environment -> raise error
UpperCAmelCase : int = MagicMock(return_value=lowercase_ )
UpperCAmelCase : List[str] = MagicMock(return_value=lowercase_ )
with patch('transformers.onnx.features.is_tf_available' , lowercase_ ), patch(
'transformers.onnx.features.is_torch_available' , lowercase_ ):
with self.assertRaises(lowercase_ ):
UpperCAmelCase : str = FeaturesManager.determine_framework(self.test_model )
| 719 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = """bloom"""
UpperCAmelCase_ : Any = ["""past_key_values"""]
UpperCAmelCase_ : Dict = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : str , lowercase_ : Optional[Any]=250_880 , lowercase_ : str=64 , lowercase_ : Any=2 , lowercase_ : Tuple=8 , lowercase_ : Optional[int]=1E-5 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=True , lowercase_ : List[str]=1 , lowercase_ : Any=2 , lowercase_ : Optional[Any]=False , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[Any]=0.0 , lowercase_ : List[Any]=1 , lowercase_ : str=False , **lowercase_ : Union[str, Any] , ) -> str:
UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase : Any = kwargs.pop('n_embed' , lowercase_ )
UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
UpperCAmelCase : Dict = n_layer
UpperCAmelCase : Union[str, Any] = n_head
UpperCAmelCase : str = layer_norm_epsilon
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : List[str] = use_cache
UpperCAmelCase : int = pretraining_tp
UpperCAmelCase : Dict = apply_residual_connection_post_layernorm
UpperCAmelCase : Any = hidden_dropout
UpperCAmelCase : Optional[Any] = attention_dropout
UpperCAmelCase : str = bos_token_id
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : List[str] = slow_but_exact
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = version.parse("""1.12""" )
def __init__( self : Any , lowercase_ : PretrainedConfig , lowercase_ : str = "default" , lowercase_ : List[PatchingSpec] = None , lowercase_ : bool = False , ) -> Any:
super().__init__(lowercase_ , task=lowercase_ , patching_specs=lowercase_ , use_past=lowercase_ )
if not getattr(self._config , 'pad_token_id' , lowercase_ ):
# TODO: how to do that better?
UpperCAmelCase : Union[str, Any] = 0
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Any = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowercase_ , direction='inputs' , inverted_values_shape=lowercase_ )
UpperCAmelCase : Union[str, Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCAmelCase : Dict = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase_ ( self : str ) -> int:
return self._config.n_layer
@property
def UpperCAmelCase_ ( self : Dict ) -> int:
return self._config.n_head
@property
def UpperCAmelCase_ ( self : str ) -> float:
return 1E-3
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : "PreTrainedTokenizer" , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
UpperCAmelCase : Optional[int] = super(lowercase_ , self ).generate_dummy_inputs(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Union[str, Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : Optional[int] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCAmelCase : Tuple = seqlen + 2
UpperCAmelCase : List[str] = self._config.hidden_size // self.num_attention_heads
UpperCAmelCase : Optional[Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCAmelCase : int = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCAmelCase : Any = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(self.num_layers )
]
UpperCAmelCase : Any = common_inputs['attention_mask']
if self.use_past:
UpperCAmelCase : Optional[int] = ordered_inputs['attention_mask'].dtype
UpperCAmelCase : List[str] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase_ ( self : List[Any] ) -> int:
return 13
| 721 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = SwinConfig(image_size=1_92 )
if "base" in model_name:
UpperCAmelCase : Optional[int] = 6
UpperCAmelCase : str = 1_28
UpperCAmelCase : Union[str, Any] = (2, 2, 18, 2)
UpperCAmelCase : Union[str, Any] = (4, 8, 16, 32)
elif "large" in model_name:
UpperCAmelCase : List[str] = 12
UpperCAmelCase : Tuple = 1_92
UpperCAmelCase : Dict = (2, 2, 18, 2)
UpperCAmelCase : List[Any] = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
UpperCAmelCase : List[Any] = window_size
UpperCAmelCase : Dict = embed_dim
UpperCAmelCase : int = depths
UpperCAmelCase : Union[str, Any] = num_heads
return config
def UpperCamelCase( UpperCAmelCase_ ):
if "encoder.mask_token" in name:
UpperCAmelCase : List[Any] = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
UpperCAmelCase : str = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
UpperCAmelCase : Any = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
UpperCAmelCase : Any = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCAmelCase : int = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCAmelCase : Any = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCAmelCase : str = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCAmelCase : List[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCAmelCase : Optional[Any] = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
UpperCAmelCase : List[Any] = 'layernorm.weight'
if name == "encoder.norm.bias":
UpperCAmelCase : List[Any] = 'layernorm.bias'
if "decoder" in name:
pass
else:
UpperCAmelCase : Optional[Any] = 'swin.' + name
return name
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase : List[Any] = orig_state_dict.pop(UpperCAmelCase_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCAmelCase : Optional[int] = key.split('.' )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : Any = int(key_split[4] )
UpperCAmelCase : int = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase : Any = val[:dim, :]
UpperCAmelCase : Optional[Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Optional[int] = val[-dim:, :]
else:
UpperCAmelCase : List[Any] = val[
:dim
]
UpperCAmelCase : Optional[Any] = val[
dim : dim * 2
]
UpperCAmelCase : List[Any] = val[
-dim:
]
else:
UpperCAmelCase : Optional[int] = val
return orig_state_dict
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )['model']
UpperCAmelCase : List[str] = get_swin_config(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = SwinForMaskedImageModeling(UpperCAmelCase_ )
model.eval()
UpperCAmelCase : List[str] = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
UpperCAmelCase : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase : Tuple = ViTImageProcessor(size={'height': 1_92, 'width': 1_92} )
UpperCAmelCase : Dict = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt' )
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase_ ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase__ : Dict = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 700 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowercase__ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def UpperCamelCase( UpperCAmelCase_ = "mumbai" ):
UpperCAmelCase : Any = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
UpperCAmelCase : Any = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
UpperCAmelCase : Optional[Any] = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 701 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ["""pixel_values"""]
def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : str , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = size if size is not None else {'shortest_edge': 384}
UpperCAmelCase : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase : str = do_resize
UpperCAmelCase : str = size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase : Optional[int] = crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase : Union[str, Any] = resample
UpperCAmelCase : Optional[int] = do_rescale
UpperCAmelCase : Dict = rescale_factor
UpperCAmelCase : str = do_normalize
UpperCAmelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : float , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ) -> np.ndarray:
UpperCAmelCase : List[str] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
UpperCAmelCase : Optional[int] = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase : List[Any] = int(shortest_edge / crop_pct )
UpperCAmelCase : Optional[int] = get_resize_output_image_size(lowercase_ , size=lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase : List[str] = resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowercase_ , size=(shortest_edge, shortest_edge) , data_format=lowercase_ , **lowercase_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowercase_ , size=(shortest_edge, shortest_edge) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image:
UpperCAmelCase : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : int = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase : Optional[Any] = resample if resample is not None else self.resample
UpperCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase : Union[str, Any] = size if size is not None else self.size
UpperCAmelCase : Optional[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase : str = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase : int = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase : str = [self.resize(image=lowercase_ , size=lowercase_ , crop_pct=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase : List[Any] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase : str = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 702 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 0 |
'''simple docstring'''
import json
import sys
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
with open(UpperCAmelCase_ , encoding='utf-8' ) as f:
UpperCAmelCase : Optional[Any] = json.load(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = results[benchmark_name]
UpperCAmelCase : List[str] = benchmark_name.split('/' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
UpperCAmelCase : Union[str, Any] = '| metric |'
UpperCAmelCase : List[Any] = '|--------|'
UpperCAmelCase : int = '| new / old (diff) |'
for metric_name in sorted(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = benchmark_res[metric_name]
UpperCAmelCase : List[Any] = metric_vals['new']
UpperCAmelCase : Any = metric_vals.get('old' , UpperCAmelCase_ )
UpperCAmelCase : int = metric_vals.get('diff' , UpperCAmelCase_ )
UpperCAmelCase : Dict = F""" {new_val:f}""" if isinstance(UpperCAmelCase_ , (int, float) ) else 'None'
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(UpperCAmelCase_ , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(UpperCAmelCase_ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(UpperCAmelCase_ ) )
if __name__ == "__main__":
lowercase__ = sys.argv[1]
lowercase__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 703 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase__ = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
UpperCAmelCase : int = self.transformer_dir
shutil.copy(
os.path.join(lowercase_ , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : Optional[Any] = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase_ ( self : int , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any]=None ) -> Tuple:
UpperCAmelCase : List[str] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
UpperCAmelCase : Optional[int] = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
UpperCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
UpperCAmelCase : Union[str, Any] = black.format_str(lowercase_ , mode=lowercase_ )
UpperCAmelCase : str = os.path.join(self.transformer_dir , 'new_code.py' )
with open(lowercase_ , 'w' , newline='\n' ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , 'r' ) as f:
self.assertTrue(f.read() , lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : int = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , lowercase_ ) , )
# Copy consistency with a really long name
UpperCAmelCase : Optional[int] = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub('Bert' , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , lowercase_ , overwrite_result=re.sub('Bert' , 'TestModel' , lowercase_ ) , )
def UpperCAmelCase_ ( self : Dict ) -> Any:
UpperCAmelCase : Dict = check_copies.LOCALIZED_READMES['README_zh-hans.md']
UpperCAmelCase : Tuple = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
UpperCAmelCase : Dict = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
UpperCAmelCase : Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
UpperCAmelCase : List[str] = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme['format_model_list'] )
self.assertFalse(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Optional[int] = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase_ )
UpperCAmelCase : str = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
UpperCAmelCase : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
UpperCAmelCase : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
UpperCAmelCase : Union[str, Any] = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(lowercase_ , lowercase_ )
| 704 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 705 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
lowercase__ = logging.getLogger(__name__)
lowercase__ = {"facebook/bart-base": BartForConditionalGeneration}
lowercase__ = {"facebook/bart-base": BartTokenizer}
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=UpperCAmelCase_ , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , )
parser.add_argument(
'--config_name' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=UpperCAmelCase_ , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Where to store the final ONNX file.' )
UpperCAmelCase : Tuple = parser.parse_args()
return args
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_="cpu" ):
UpperCAmelCase : List[Any] = model_dict[model_name].from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ )
UpperCAmelCase : str = tokenizer_dict[model_name].from_pretrained(UpperCAmelCase_ )
if model_name in ["facebook/bart-base"]:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : Tuple = None
UpperCAmelCase : Dict = 0
return huggingface_model, tokenizer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
model.eval()
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[int] = torch.jit.script(BARTBeamSearchGenerator(UpperCAmelCase_ ) )
with torch.no_grad():
UpperCAmelCase : Optional[Any] = 'My friends are cool but they eat too many carbs.'
UpperCAmelCase : Any = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors='pt' ).to(model.device )
UpperCAmelCase : Optional[Any] = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=UpperCAmelCase_ , max_length=UpperCAmelCase_ , early_stopping=UpperCAmelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
UpperCAmelCase_ , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , UpperCAmelCase_ , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=UpperCAmelCase_ , )
logger.info('Model exported to {}'.format(UpperCAmelCase_ ) )
UpperCAmelCase : List[str] = remove_dup_initializers(os.path.abspath(UpperCAmelCase_ ) )
logger.info('Deduplicated and optimized model written to {}'.format(UpperCAmelCase_ ) )
UpperCAmelCase : List[Any] = onnxruntime.InferenceSession(UpperCAmelCase_ )
UpperCAmelCase : Any = ort_sess.run(
UpperCAmelCase_ , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(UpperCAmelCase_ ),
'max_length': np.array(UpperCAmelCase_ ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def UpperCamelCase( ):
UpperCAmelCase : Optional[Any] = parse_args()
UpperCAmelCase : Union[str, Any] = 5
UpperCAmelCase : Dict = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase : Union[str, Any] = torch.device(args.device )
UpperCAmelCase : Tuple = load_model_tokenizer(args.model_name_or_path , UpperCAmelCase_ )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(UpperCAmelCase_ )
if args.max_length:
UpperCAmelCase : Dict = args.max_length
if args.num_beams:
UpperCAmelCase : Tuple = args.num_beams
if args.output_file_path:
UpperCAmelCase : Tuple = args.output_file_path
else:
UpperCAmelCase : int = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 706 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """deformable_detr"""
UpperCAmelCase_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=True , lowercase_ : str=None , lowercase_ : int=3 , lowercase_ : List[Any]=300 , lowercase_ : List[str]=1_024 , lowercase_ : Tuple=6 , lowercase_ : Union[str, Any]=1_024 , lowercase_ : Optional[Any]=8 , lowercase_ : List[Any]=6 , lowercase_ : Any=1_024 , lowercase_ : List[Any]=8 , lowercase_ : Tuple=0.0 , lowercase_ : int=True , lowercase_ : int="relu" , lowercase_ : Union[str, Any]=256 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : Optional[Any]=1.0 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=False , lowercase_ : Any="sine" , lowercase_ : Dict="resnet50" , lowercase_ : List[str]=True , lowercase_ : Dict=False , lowercase_ : List[Any]=4 , lowercase_ : List[Any]=4 , lowercase_ : Union[str, Any]=4 , lowercase_ : Optional[Any]=False , lowercase_ : Tuple=300 , lowercase_ : Tuple=False , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[Any]=1 , lowercase_ : Dict=1 , lowercase_ : Dict=5 , lowercase_ : str=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[str]=0.25 , lowercase_ : Union[str, Any]=False , **lowercase_ : List[str] , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
UpperCAmelCase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Optional[int] = backbone_config.get('model_type' )
UpperCAmelCase : Any = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : List[str] = config_class.from_dict(lowercase_ )
UpperCAmelCase : Any = use_timm_backbone
UpperCAmelCase : int = backbone_config
UpperCAmelCase : Any = num_channels
UpperCAmelCase : Optional[int] = num_queries
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Optional[Any] = d_model
UpperCAmelCase : Any = encoder_ffn_dim
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : Any = encoder_attention_heads
UpperCAmelCase : Optional[int] = decoder_ffn_dim
UpperCAmelCase : Union[str, Any] = decoder_layers
UpperCAmelCase : List[Any] = decoder_attention_heads
UpperCAmelCase : Any = dropout
UpperCAmelCase : Dict = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : List[Any] = init_std
UpperCAmelCase : Union[str, Any] = init_xavier_std
UpperCAmelCase : Optional[Any] = encoder_layerdrop
UpperCAmelCase : Tuple = auxiliary_loss
UpperCAmelCase : Tuple = position_embedding_type
UpperCAmelCase : Tuple = backbone
UpperCAmelCase : Optional[int] = use_pretrained_backbone
UpperCAmelCase : Tuple = dilation
# deformable attributes
UpperCAmelCase : Union[str, Any] = num_feature_levels
UpperCAmelCase : Optional[int] = encoder_n_points
UpperCAmelCase : str = decoder_n_points
UpperCAmelCase : str = two_stage
UpperCAmelCase : int = two_stage_num_proposals
UpperCAmelCase : Tuple = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
UpperCAmelCase : List[str] = class_cost
UpperCAmelCase : List[str] = bbox_cost
UpperCAmelCase : str = giou_cost
# Loss coefficients
UpperCAmelCase : List[str] = mask_loss_coefficient
UpperCAmelCase : Union[str, Any] = dice_loss_coefficient
UpperCAmelCase : List[str] = bbox_loss_coefficient
UpperCAmelCase : List[str] = giou_loss_coefficient
UpperCAmelCase : Union[str, Any] = eos_coefficient
UpperCAmelCase : Optional[int] = focal_alpha
UpperCAmelCase : Union[str, Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self : int ) -> int:
return self.d_model
def UpperCAmelCase_ ( self : int ) -> List[Any]:
UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCAmelCase : Optional[Any] = self.backbone_config.to_dict()
UpperCAmelCase : List[Any] = self.__class__.model_type
return output
| 707 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 0 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase__ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """maskformer"""
UpperCAmelCase_ : Optional[Any] = {"""hidden_size""": """mask_feature_size"""}
UpperCAmelCase_ : Optional[int] = ["""resnet""", """swin"""]
UpperCAmelCase_ : Optional[Any] = ["""detr"""]
def __init__( self : str , lowercase_ : int = 256 , lowercase_ : int = 256 , lowercase_ : float = 0.1 , lowercase_ : bool = False , lowercase_ : Optional[Dict] = None , lowercase_ : Optional[Dict] = None , lowercase_ : float = 0.02 , lowercase_ : float = 1.0 , lowercase_ : float = 1.0 , lowercase_ : float = 1.0 , lowercase_ : float = 20.0 , lowercase_ : Optional[bool] = None , **lowercase_ : Optional[Any] , ) -> Tuple:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase : Tuple = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Optional[int] = backbone_config.pop('model_type' )
UpperCAmelCase : Tuple = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : Dict = config_class.from_dict(lowercase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase : List[Any] = (
decoder_config.pop('model_type' ) if isinstance(lowercase_ , lowercase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Dict = CONFIG_MAPPING[decoder_type]
UpperCAmelCase : List[str] = config_class.from_dict(lowercase_ )
UpperCAmelCase : Any = backbone_config
UpperCAmelCase : Optional[int] = decoder_config
# main feature dimension for the model
UpperCAmelCase : str = fpn_feature_size
UpperCAmelCase : List[Any] = mask_feature_size
# initializer
UpperCAmelCase : Tuple = init_std
UpperCAmelCase : int = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase : str = cross_entropy_weight
UpperCAmelCase : Union[str, Any] = dice_weight
UpperCAmelCase : Any = mask_weight
UpperCAmelCase : Tuple = use_auxiliary_loss
UpperCAmelCase : Any = no_object_weight
UpperCAmelCase : str = output_auxiliary_logits
UpperCAmelCase : Tuple = self.decoder_config.encoder_attention_heads
UpperCAmelCase : int = self.decoder_config.num_hidden_layers
super().__init__(**lowercase_ )
@classmethod
def UpperCAmelCase_ ( cls : Optional[int] , lowercase_ : PretrainedConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] ) -> int:
return cls(
backbone_config=lowercase_ , decoder_config=lowercase_ , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict[str, any]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[Any] = self.backbone_config.to_dict()
UpperCAmelCase : Dict = self.decoder_config.to_dict()
UpperCAmelCase : Optional[int] = self.__class__.model_type
return output
| 708 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = LongformerTokenizer
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : List[Any] = LongformerTokenizerFast
UpperCAmelCase_ : int = True
def UpperCAmelCase_ ( self : Any ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCAmelCase : Any = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase : Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCAmelCase : Optional[Any] = {'unk_token': '<unk>'}
UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase_ ) )
def UpperCAmelCase_ ( self : Any , **lowercase_ : List[str] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self : Tuple , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : Optional[int] ) -> List[str]:
UpperCAmelCase : Dict = 'lower newer'
UpperCAmelCase : Any = 'lower newer'
return input_text, output_text
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase : Tuple = 'lower newer'
UpperCAmelCase : Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCAmelCase : Optional[Any] = tokenizer.tokenize(lowercase_ ) # , add_prefix_space=True)
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowercase_ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowercase_ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : int = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
UpperCAmelCase : Dict = tokenizer.encode('sequence builders' , add_special_tokens=lowercase_ )
UpperCAmelCase : str = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase_ )
UpperCAmelCase : str = tokenizer.encode(
'sequence builders' , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase : Any = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase_ )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase_ ( self : Any ) -> str:
UpperCAmelCase : Dict = self.get_tokenizer()
UpperCAmelCase : List[Any] = 'Encode this sequence.'
UpperCAmelCase : List[Any] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
UpperCAmelCase : Any = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase_ , lowercase_ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
UpperCAmelCase : Optional[Any] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
# Testing spaces after special tokens
UpperCAmelCase : Any = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ )} ) # mask token has a left space
UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(lowercase_ )
UpperCAmelCase : int = 'Encode <mask> sequence'
UpperCAmelCase : Optional[Any] = 'Encode <mask>sequence'
UpperCAmelCase : Dict = tokenizer.encode(lowercase_ )
UpperCAmelCase : str = encoded.index(lowercase_ )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase_ , lowercase_ )
UpperCAmelCase : List[str] = tokenizer.encode(lowercase_ )
UpperCAmelCase : List[str] = encoded.index(lowercase_ )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> int:
pass
def UpperCAmelCase_ ( self : str ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase : Optional[int] = 'A, <mask> AllenNLP sentence.'
UpperCAmelCase : List[Any] = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
UpperCAmelCase : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
UpperCAmelCase : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCAmelCase : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowercase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : Any = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowercase_ )
self.assertEqual(post_processor_state['add_prefix_space'] , lowercase_ )
self.assertEqual(post_processor_state['trim_offsets'] , lowercase_ )
def UpperCAmelCase_ ( self : str ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase : int = f"""{text_of_1_token} {text_of_1_token}"""
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : Union[str, Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : Any = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : Union[str, Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : List[str] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCAmelCase : Dict = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : Dict = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ) + 1, 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : Tuple = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
UpperCAmelCase : List[Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
| 709 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = "cpu" , UpperCAmelCase_ = None ):
UpperCAmelCase : Dict = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(UpperCAmelCase_ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
UpperCAmelCase : str = v.half()
if save_path is None: # overwrite src_path
UpperCAmelCase : str = src_path
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert)
| 710 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 0 |
'''simple docstring'''
lowercase__ = {str(digit): digit**5 for digit in range(10)}
def UpperCamelCase( UpperCAmelCase_ ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCAmelCase_ ) )
def UpperCamelCase( ) -> Optional[int]:
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(UpperCAmelCase_ ) )
if __name__ == "__main__":
print(solution())
| 711 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = DownBlockaD # noqa F405
UpperCAmelCase_ : Any = """down"""
def UpperCAmelCase_ ( self : str ) -> Any:
UpperCAmelCase : Any = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = ResnetDownsampleBlockaD # noqa F405
UpperCAmelCase_ : int = """down"""
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase : Optional[int] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = AttnDownBlockaD # noqa F405
UpperCAmelCase_ : str = """down"""
def UpperCAmelCase_ ( self : int ) -> Tuple:
UpperCAmelCase : List[Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = CrossAttnDownBlockaD # noqa F405
UpperCAmelCase_ : List[Any] = """down"""
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
UpperCAmelCase : Optional[int] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Any = 32
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
UpperCAmelCase : Tuple = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = SimpleCrossAttnDownBlockaD # noqa F405
UpperCAmelCase_ : List[Any] = """down"""
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
return super().get_dummy_input(include_encoder_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Optional[int] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : int = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = SkipDownBlockaD # noqa F405
UpperCAmelCase_ : Optional[Any] = """down"""
@property
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
return super().get_dummy_input(include_skip_sample=lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[int] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = AttnSkipDownBlockaD # noqa F405
UpperCAmelCase_ : List[str] = """down"""
@property
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
return super().get_dummy_input(include_skip_sample=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> int:
UpperCAmelCase : int = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = DownEncoderBlockaD # noqa F405
UpperCAmelCase_ : Union[str, Any] = """down"""
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
return super().get_dummy_input(include_temb=lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Tuple = {
'in_channels': 32,
'out_channels': 32,
}
UpperCAmelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : int = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = AttnDownEncoderBlockaD # noqa F405
UpperCAmelCase_ : Optional[int] = """down"""
@property
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
return super().get_dummy_input(include_temb=lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : str = {
'in_channels': 32,
'out_channels': 32,
}
UpperCAmelCase : Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : str ) -> int:
UpperCAmelCase : Any = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = UNetMidBlockaD # noqa F405
UpperCAmelCase_ : Optional[int] = """mid"""
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = {
'in_channels': 32,
'temb_channels': 128,
}
UpperCAmelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Any ) -> str:
UpperCAmelCase : Optional[int] = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = UNetMidBlockaDCrossAttn # noqa F405
UpperCAmelCase_ : Union[str, Any] = """mid"""
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
UpperCAmelCase : List[str] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : str ) -> str:
UpperCAmelCase : str = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
UpperCAmelCase_ : Any = """mid"""
@property
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
return super().get_dummy_input(include_encoder_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
UpperCAmelCase : Any = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : Any = 32
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = UpBlockaD # noqa F405
UpperCAmelCase_ : List[str] = """up"""
@property
def UpperCAmelCase_ ( self : Any ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
UpperCAmelCase : List[Any] = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ResnetUpsampleBlockaD # noqa F405
UpperCAmelCase_ : List[Any] = """up"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = CrossAttnUpBlockaD # noqa F405
UpperCAmelCase_ : Tuple = """up"""
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
UpperCAmelCase : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Tuple ) -> int:
UpperCAmelCase : int = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = SimpleCrossAttnUpBlockaD # noqa F405
UpperCAmelCase_ : Union[str, Any] = """up"""
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ , include_encoder_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : Dict = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase : str = 32
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
UpperCAmelCase : Any = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = AttnUpBlockaD # noqa F405
UpperCAmelCase_ : str = """up"""
@property
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
UpperCAmelCase : List[str] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = SkipUpBlockaD # noqa F405
UpperCAmelCase_ : Union[str, Any] = """up"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = AttnSkipUpBlockaD # noqa F405
UpperCAmelCase_ : Union[str, Any] = """up"""
@property
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
def UpperCAmelCase_ ( self : int ) -> str:
UpperCAmelCase : str = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = UpDecoderBlockaD # noqa F405
UpperCAmelCase_ : str = """up"""
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
return super().get_dummy_input(include_temb=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> int:
UpperCAmelCase : Any = {'in_channels': 32, 'out_channels': 32}
UpperCAmelCase : str = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Union[str, Any] = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(lowercase_ )
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = AttnUpDecoderBlockaD # noqa F405
UpperCAmelCase_ : Union[str, Any] = """up"""
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return super().get_dummy_input(include_temb=lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : List[Any] = {'in_channels': 32, 'out_channels': 32}
UpperCAmelCase : Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Tuple = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(lowercase_ )
| 712 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 0 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Union[str, Any] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
UpperCAmelCase : Dict = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase : str = f"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
UpperCAmelCase : List[Any] = [sys.executable] + distributed_args
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
| 713 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ) -> Any:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ) -> List[Any]:
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ) -> List[str]:
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 0 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
UpperCAmelCase : Tuple = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(UpperCAmelCase_ )
DownloadCommand.register_subcommand(UpperCAmelCase_ )
EnvironmentCommand.register_subcommand(UpperCAmelCase_ )
RunCommand.register_subcommand(UpperCAmelCase_ )
ServeCommand.register_subcommand(UpperCAmelCase_ )
UserCommands.register_subcommand(UpperCAmelCase_ )
AddNewModelCommand.register_subcommand(UpperCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(UpperCAmelCase_ )
LfsCommands.register_subcommand(UpperCAmelCase_ )
PTtoTFCommand.register_subcommand(UpperCAmelCase_ )
# Let's go
UpperCAmelCase : str = parser.parse_args()
if not hasattr(UpperCAmelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase : Union[str, Any] = args.func(UpperCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : Optional[Any] = f.readlines()
UpperCAmelCase : List[Any] = F"""class {class_name}("""
UpperCAmelCase : Optional[int] = F"""{4 * " "}def {test_name}("""
UpperCAmelCase : Optional[int] = F"""{8 * " "}{correct_line.split()[0]}"""
UpperCAmelCase : str = F"""{16 * " "}{correct_line.split()[0]}"""
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Any = False
UpperCAmelCase : int = 0
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = []
for line in lines:
if line.startswith(UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = True
elif in_class and line.startswith(UpperCAmelCase_ ):
UpperCAmelCase : int = True
elif in_class and in_func and (line.startswith(UpperCAmelCase_ ) or line.startswith(UpperCAmelCase_ )):
UpperCAmelCase : List[Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase : str = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
UpperCAmelCase : Union[str, Any] = False
else:
new_lines.append(UpperCAmelCase_ )
with open(UpperCAmelCase_ , 'w' ) as f:
for line in new_lines:
f.write(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=None ):
if fail is not None:
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : Optional[Any] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase : Tuple = None
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : List[Any] = f.readlines()
UpperCAmelCase : Optional[int] = defaultdict(UpperCAmelCase_ )
for line in correct_lines:
UpperCAmelCase : str = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
lowercase__ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 716 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 0 |
'''simple docstring'''
lowercase__ = 8.3144598
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowercase__ = 300
lowercase__ = 28
lowercase__ = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 717 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 0 |
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 718 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 719 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 0 |
'''simple docstring'''
import math
def UpperCamelCase( UpperCAmelCase_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase( UpperCAmelCase_ = 1_00_01 ):
try:
UpperCAmelCase : str = int(UpperCAmelCase_ )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
UpperCAmelCase : list[int] = []
UpperCAmelCase : Dict = 2
while len(UpperCAmelCase_ ) < nth:
if is_prime(UpperCAmelCase_ ):
primes.append(UpperCAmelCase_ )
num += 1
else:
num += 1
return primes[len(UpperCAmelCase_ ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 720 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 0 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 721 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 700 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Union[str, Any] = data
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
def UpperCamelCase( UpperCAmelCase_ ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def UpperCamelCase( UpperCAmelCase_ ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def UpperCamelCase( UpperCAmelCase_ ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def UpperCamelCase( ): # Main function for testing.
UpperCAmelCase : Optional[Any] = Node(1 )
UpperCAmelCase : Tuple = Node(2 )
UpperCAmelCase : List[str] = Node(3 )
UpperCAmelCase : Any = Node(4 )
UpperCAmelCase : Union[str, Any] = Node(5 )
UpperCAmelCase : Any = Node(6 )
UpperCAmelCase : Union[str, Any] = Node(7 )
UpperCAmelCase : List[Any] = Node(8 )
UpperCAmelCase : Tuple = Node(9 )
print(is_full_binary_tree(UpperCAmelCase_ ) )
print(depth_of_tree(UpperCAmelCase_ ) )
print('Tree is: ' )
display(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 702 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 0 |
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase__ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
lowercase__ = logging.WARNING
def UpperCamelCase( ):
UpperCAmelCase : Tuple = os.getenv('DATASETS_VERBOSITY' , UpperCAmelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCamelCase( ):
return __name__.split('.' )[0]
def UpperCamelCase( ):
return logging.getLogger(_get_library_name() )
def UpperCamelCase( ):
# Apply our default configuration to the library root logger.
UpperCAmelCase : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCamelCase( ):
UpperCAmelCase : Tuple = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCamelCase( UpperCAmelCase_ = None ):
if name is None:
UpperCAmelCase : List[Any] = _get_library_name()
return logging.getLogger(UpperCAmelCase_ )
def UpperCamelCase( ):
return _get_library_root_logger().getEffectiveLevel()
def UpperCamelCase( UpperCAmelCase_ ):
_get_library_root_logger().setLevel(UpperCAmelCase_ )
def UpperCamelCase( ):
return set_verbosity(UpperCAmelCase_ )
def UpperCamelCase( ):
return set_verbosity(UpperCAmelCase_ )
def UpperCamelCase( ):
return set_verbosity(UpperCAmelCase_ )
def UpperCamelCase( ):
return set_verbosity(UpperCAmelCase_ )
def UpperCamelCase( ):
UpperCAmelCase : int = False
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class A_ :
'''simple docstring'''
def __init__( self : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Any ) -> Optional[int]: # pylint: disable=unused-argument
UpperCAmelCase : List[Any] = args[0] if args else None
def __iter__( self : Tuple ) -> str:
return iter(self._iterator )
def __getattr__( self : List[str] , lowercase_ : Union[str, Any] ) -> str:
def empty_fn(*lowercase_ : Union[str, Any] , **lowercase_ : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : str ) -> Union[str, Any]:
return self
def __exit__( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Tuple ) -> str:
return
lowercase__ = True
class A_ :
'''simple docstring'''
def __call__( self : Optional[int] , *lowercase_ : Tuple , lowercase_ : List[str]=False , **lowercase_ : Dict ) -> Union[str, Any]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowercase_ , **lowercase_ )
else:
return EmptyTqdm(*lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , *lowercase_ : int , **lowercase_ : Any ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase__ = _tqdm_cls()
def UpperCamelCase( ):
global _tqdm_active
return bool(_tqdm_active )
def UpperCamelCase( ):
global _tqdm_active
UpperCAmelCase : Dict = True
def UpperCamelCase( ):
global _tqdm_active
UpperCAmelCase : Any = False
| 703 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ["""sentencepiece"""]
def __init__( self : int , *lowercase_ : Optional[Any] , **lowercase_ : Tuple ) -> int:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Any = ["""sentencepiece"""]
def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[str] ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""sentencepiece"""]
def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ["""sentencepiece"""]
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : Tuple ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""sentencepiece"""]
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *lowercase_ : Optional[int] , **lowercase_ : int ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ["""sentencepiece"""]
def __init__( self : str , *lowercase_ : Any , **lowercase_ : List[str] ) -> Any:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ["""sentencepiece"""]
def __init__( self : str , *lowercase_ : Dict , **lowercase_ : str ) -> int:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[Any] ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""sentencepiece"""]
def __init__( self : Dict , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : int , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""sentencepiece"""]
def __init__( self : Any , *lowercase_ : Any , **lowercase_ : Any ) -> int:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = ["""sentencepiece"""]
def __init__( self : Tuple , *lowercase_ : Dict , **lowercase_ : Union[str, Any] ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Any ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *lowercase_ : str , **lowercase_ : Tuple ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = ["""sentencepiece"""]
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : int ) -> str:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ["""sentencepiece"""]
def __init__( self : int , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any] ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""sentencepiece"""]
def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : int ) -> str:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ["""sentencepiece"""]
def __init__( self : str , *lowercase_ : Any , **lowercase_ : int ) -> str:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : Dict ) -> str:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = ["""sentencepiece"""]
def __init__( self : int , *lowercase_ : Optional[Any] , **lowercase_ : str ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ["""sentencepiece"""]
def __init__( self : int , *lowercase_ : Any , **lowercase_ : Dict ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = ["""sentencepiece"""]
def __init__( self : List[Any] , *lowercase_ : int , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ["""sentencepiece"""]
def __init__( self : Any , *lowercase_ : int , **lowercase_ : List[str] ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ["""sentencepiece"""]
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : List[Any] ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = ["""sentencepiece"""]
def __init__( self : Optional[int] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : Dict ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ["""sentencepiece"""]
def __init__( self : Dict , *lowercase_ : Optional[Any] , **lowercase_ : str ) -> Any:
requires_backends(self , ['sentencepiece'] )
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
| 704 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 705 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
UpperCAmelCase_ : List[Any] = StableDiffusionInpaintPipeline
UpperCAmelCase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase_ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase_ : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase_ : Optional[Any] = frozenset([] )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , )
UpperCAmelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=lowercase_ )
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
UpperCAmelCase : Optional[Any] = CLIPTextModel(lowercase_ )
UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : Tuple=0 ) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : Tuple = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((64, 64) )
UpperCAmelCase : Dict = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(lowercase_ ).startswith('mps' ):
UpperCAmelCase : int = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self : List[str] ) -> str:
UpperCAmelCase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase : List[str] = StableDiffusionInpaintPipeline(**lowercase_ )
UpperCAmelCase : List[str] = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Dict = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase : Union[str, Any] = sd_pipe(**lowercase_ ).images
UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
UpperCAmelCase : int = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase : Any = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Optional[Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : Any = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase_ ( self : Tuple ) -> int:
UpperCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
UpperCAmelCase : List[str] = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase : Tuple = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Optional[Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase : Dict = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase : List[str] = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
UpperCAmelCase : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase : List[str] = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase : Dict = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 706 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 0 |
'''simple docstring'''
lowercase__ = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 707 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
def update_area_of_max_square(UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
UpperCAmelCase : Union[str, Any] = update_area_of_max_square(UpperCAmelCase_ , col + 1 )
UpperCAmelCase : Union[str, Any] = update_area_of_max_square(row + 1 , col + 1 )
UpperCAmelCase : List[Any] = update_area_of_max_square(row + 1 , UpperCAmelCase_ )
if mat[row][col]:
UpperCAmelCase : Any = 1 + min([right, diagonal, down] )
UpperCAmelCase : Dict = max(largest_square_area[0] , UpperCAmelCase_ )
return sub_problem_sol
else:
return 0
UpperCAmelCase : str = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
def update_area_of_max_square_using_dp_array(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
UpperCAmelCase : Union[str, Any] = update_area_of_max_square_using_dp_array(UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
UpperCAmelCase : List[str] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
if mat[row][col]:
UpperCAmelCase : str = 1 + min([right, diagonal, down] )
UpperCAmelCase : Optional[int] = max(largest_square_area[0] , UpperCAmelCase_ )
UpperCAmelCase : List[str] = sub_problem_sol
return sub_problem_sol
else:
return 0
UpperCAmelCase : Dict = [0]
UpperCAmelCase : Optional[Any] = [[-1] * cols for _ in range(UpperCAmelCase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , UpperCAmelCase_ )
return largest_square_area[0]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = [[0] * (cols + 1) for _ in range(rows + 1 )]
UpperCAmelCase : Optional[int] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase : Dict = dp_array[row][col + 1]
UpperCAmelCase : Optional[Any] = dp_array[row + 1][col + 1]
UpperCAmelCase : Tuple = dp_array[row + 1][col]
if mat[row][col] == 1:
UpperCAmelCase : Tuple = 1 + min(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[str] = max(dp_array[row][col] , UpperCAmelCase_ )
else:
UpperCAmelCase : List[Any] = 0
return largest_square_area
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : str = [0] * (cols + 1)
UpperCAmelCase : List[str] = [0] * (cols + 1)
UpperCAmelCase : Optional[int] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase : List[Any] = current_row[col + 1]
UpperCAmelCase : List[Any] = next_row[col + 1]
UpperCAmelCase : Union[str, Any] = next_row[col]
if mat[row][col] == 1:
UpperCAmelCase : Any = 1 + min(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[str] = max(current_row[col] , UpperCAmelCase_ )
else:
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Any = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 708 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """trajectory_transformer"""
UpperCAmelCase_ : Any = ["""past_key_values"""]
UpperCAmelCase_ : Any = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[Any] , lowercase_ : List[Any]=100 , lowercase_ : Dict=5 , lowercase_ : Optional[int]=1 , lowercase_ : int=1 , lowercase_ : int=249 , lowercase_ : Any=6 , lowercase_ : Tuple=17 , lowercase_ : Dict=25 , lowercase_ : List[Any]=4 , lowercase_ : Tuple=4 , lowercase_ : Dict=128 , lowercase_ : List[str]=0.1 , lowercase_ : int=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : int=0.0006 , lowercase_ : int=512 , lowercase_ : int=0.02 , lowercase_ : Dict=1E-12 , lowercase_ : List[str]=1 , lowercase_ : Union[str, Any]=True , lowercase_ : Tuple=1 , lowercase_ : Any=50_256 , lowercase_ : Optional[Any]=50_256 , **lowercase_ : Any , ) -> int:
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Optional[Any] = action_weight
UpperCAmelCase : Tuple = reward_weight
UpperCAmelCase : Union[str, Any] = value_weight
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Tuple = block_size
UpperCAmelCase : Optional[int] = action_dim
UpperCAmelCase : Tuple = observation_dim
UpperCAmelCase : Any = transition_dim
UpperCAmelCase : Dict = learning_rate
UpperCAmelCase : Any = n_layer
UpperCAmelCase : str = n_head
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : List[str] = embd_pdrop
UpperCAmelCase : Union[str, Any] = attn_pdrop
UpperCAmelCase : str = resid_pdrop
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Dict = layer_norm_eps
UpperCAmelCase : Optional[Any] = kaiming_initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 709 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = (EulerDiscreteScheduler,)
UpperCAmelCase_ : Dict = 10
def UpperCAmelCase_ ( self : Optional[int] , **lowercase_ : int ) -> str:
UpperCAmelCase : int = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowercase_ )
return config
def UpperCAmelCase_ ( self : Any ) -> str:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : List[str] = self.scheduler_classes[0]
UpperCAmelCase : Tuple = self.get_scheduler_config()
UpperCAmelCase : Any = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Any = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : Tuple = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : Optional[int] = scheduler.scale_model_input(lowercase_ , lowercase_ )
UpperCAmelCase : List[str] = model(lowercase_ , lowercase_ )
UpperCAmelCase : Union[str, Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ )
UpperCAmelCase : List[str] = output.prev_sample
UpperCAmelCase : Tuple = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def UpperCAmelCase_ ( self : List[Any] ) -> int:
UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCAmelCase : List[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase : List[str] = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase : List[str] = scheduler.scale_model_input(lowercase_ , lowercase_ )
UpperCAmelCase : Any = model(lowercase_ , lowercase_ )
UpperCAmelCase : str = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self.scheduler_classes[0]
UpperCAmelCase : List[str] = self.get_scheduler_config()
UpperCAmelCase : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_ )
UpperCAmelCase : Dict = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = self.dummy_model()
UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : Optional[int] = sample.to(lowercase_ )
for t in scheduler.timesteps:
UpperCAmelCase : int = scheduler.scale_model_input(lowercase_ , lowercase_ )
UpperCAmelCase : Optional[Any] = model(lowercase_ , lowercase_ )
UpperCAmelCase : Any = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ )
UpperCAmelCase : Tuple = output.prev_sample
UpperCAmelCase : Tuple = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase : Optional[int] = self.get_scheduler_config()
UpperCAmelCase : Optional[int] = scheduler_class(**lowercase_ , use_karras_sigmas=lowercase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : Dict = self.dummy_model()
UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase : Optional[int] = sample.to(lowercase_ )
for t in scheduler.timesteps:
UpperCAmelCase : int = scheduler.scale_model_input(lowercase_ , lowercase_ )
UpperCAmelCase : Union[str, Any] = model(lowercase_ , lowercase_ )
UpperCAmelCase : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ )
UpperCAmelCase : Dict = output.prev_sample
UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase : List[str] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 710 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 0 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowercase__ = logging.getLogger(__name__)
lowercase__ = 50 # max width of layer names
lowercase__ = 70 # max width of quantizer names
def UpperCamelCase( UpperCAmelCase_ ) -> Optional[Any]:
UpperCAmelCase : List[Any] = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=UpperCAmelCase_ , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=UpperCAmelCase_ , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=UpperCAmelCase_ , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=UpperCAmelCase_ , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=UpperCAmelCase_ , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=UpperCAmelCase_ , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def UpperCamelCase( UpperCAmelCase_ ) -> Dict:
if args.calibrator == "max":
UpperCAmelCase : Tuple = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
UpperCAmelCase : List[str] = 'histogram'
elif args.calibrator == "mse":
UpperCAmelCase : List[str] = 'histogram'
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
UpperCAmelCase : Optional[Any] = QuantDescriptor(num_bits=args.aprec , calib_method=UpperCAmelCase_ )
UpperCAmelCase : List[str] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(UpperCAmelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=False ) -> Optional[Any]:
logger.info('Configuring Model for Quantization' )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(UpperCAmelCase_ , ['embeddings'] , which='weight' , _disabled=UpperCAmelCase_ )
if args.quant_disable:
set_quantizer_by_name(UpperCAmelCase_ , [''] , _disabled=UpperCAmelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(UpperCAmelCase_ , args.quant_disable_keyword , _disabled=UpperCAmelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(UpperCAmelCase_ , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=UpperCAmelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(UpperCAmelCase_ , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=UpperCAmelCase_ )
if args.recalibrate_weights:
recalibrate_weights(UpperCAmelCase_ )
if args.fuse_qkv:
fuse_qkv(UpperCAmelCase_ , UpperCAmelCase_ )
if args.clip_gelu:
clip_gelu(UpperCAmelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ ) -> List[str]:
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ) -> Optional[Any]:
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ) -> Union[str, Any]:
def fusea(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(UpperCAmelCase_ , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
UpperCAmelCase : Optional[Any] = qq._amax.detach().item()
UpperCAmelCase : str = qk._amax.detach().item()
UpperCAmelCase : List[str] = qv._amax.detach().item()
UpperCAmelCase : Union[str, Any] = max(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
qq._amax.fill_(UpperCAmelCase_ )
qk._amax.fill_(UpperCAmelCase_ )
qv._amax.fill_(UpperCAmelCase_ )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ) -> Dict:
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
UpperCAmelCase : Dict = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=UpperCAmelCase_ )
UpperCAmelCase : Tuple = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def UpperCamelCase( UpperCAmelCase_ ) -> List[Any]:
for name, mod in model.named_modules():
if hasattr(UpperCAmelCase_ , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase : Optional[int] = mod.weight.shape[0]
UpperCAmelCase : Tuple = mod._weight_quantizer._amax.detach()
UpperCAmelCase : Any = torch.ones(UpperCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def UpperCamelCase( UpperCAmelCase_ ) -> List[str]:
for name, mod in model.named_modules():
if hasattr(UpperCAmelCase_ , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase : List[str] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase : List[str] = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase : List[str] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=UpperCAmelCase_ , keepdims=UpperCAmelCase_ ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
UpperCAmelCase : Any = amax
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=25 , UpperCAmelCase_=1_80 , UpperCAmelCase_=None ) -> str:
if ignore is None:
UpperCAmelCase : Union[str, Any] = []
elif not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = [ignore]
UpperCAmelCase : Tuple = 0
for name, mod in model.named_modules():
if not hasattr(UpperCAmelCase_ , 'weight' ):
continue
UpperCAmelCase : Dict = max(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
for name, mod in model.named_modules():
UpperCAmelCase : Tuple = getattr(UpperCAmelCase_ , '_input_quantizer' , UpperCAmelCase_ )
UpperCAmelCase : str = getattr(UpperCAmelCase_ , '_weight_quantizer' , UpperCAmelCase_ )
if not hasattr(UpperCAmelCase_ , 'weight' ):
continue
if type(UpperCAmelCase_ ) in ignore:
continue
if [True for s in ignore if type(UpperCAmelCase_ ) is str and s in name]:
continue
UpperCAmelCase : List[str] = F"""Act:{input_q.extra_repr()}"""
UpperCAmelCase : Optional[int] = F"""Wgt:{weight_q.extra_repr()}"""
UpperCAmelCase : Union[str, Any] = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(UpperCAmelCase_ ) <= line_width:
logger.info(UpperCAmelCase_ )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{" ":{name_width}} {wgt_str}""" )
def UpperCamelCase( UpperCAmelCase_ ) -> List[str]:
UpperCAmelCase : Dict = 0
for name, mod in model.named_modules():
if isinstance(UpperCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Optional[int]:
UpperCAmelCase : Dict = getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if quantizer_mod is not None:
assert hasattr(UpperCAmelCase_ , UpperCAmelCase_ )
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="both" , **UpperCAmelCase_ ) -> Union[str, Any]:
UpperCAmelCase : Tuple = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(UpperCAmelCase_ , UpperCAmelCase_ , '_input_quantizer' , UpperCAmelCase_ , UpperCAmelCase_ )
if which in ["weight", "both"]:
set_quantizer(UpperCAmelCase_ , UpperCAmelCase_ , '_weight_quantizer' , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) -> Tuple:
for name, mod in model.named_modules():
if hasattr(UpperCAmelCase_ , '_input_quantizer' ) or hasattr(UpperCAmelCase_ , '_weight_quantizer' ):
for n in names:
if re.search(UpperCAmelCase_ , UpperCAmelCase_ ):
set_quantizers(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info(UpperCAmelCase_ )
| 711 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
UpperCAmelCase : Optional[int] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert('RGB' )
return image
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = dct.pop(UpperCAmelCase_ )
UpperCAmelCase : List[str] = val
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase : Optional[Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
UpperCAmelCase : str = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
UpperCAmelCase : str = torch.cat((q_bias, torch.zeros_like(UpperCAmelCase_ , requires_grad=UpperCAmelCase_ ), v_bias) )
UpperCAmelCase : Tuple = qkv_bias
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = 3_64 if 'coco' in model_name else 2_24
UpperCAmelCase : Optional[int] = InstructBlipVisionConfig(image_size=UpperCAmelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
UpperCAmelCase : List[Any] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase : List[str] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
UpperCAmelCase : List[str] = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
UpperCAmelCase : int = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_20_01 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
UpperCAmelCase : Tuple = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
UpperCAmelCase : List[str] = InstructBlipConfig(vision_config=UpperCAmelCase_ , text_config=UpperCAmelCase_ , qformer_config=UpperCAmelCase_ )
return config, image_size
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=False ):
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
UpperCAmelCase : Optional[Any] = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
UpperCAmelCase : Any = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
UpperCAmelCase : Dict = get_blipa_config(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = InstructBlipForConditionalGeneration(UpperCAmelCase_ ).eval()
UpperCAmelCase : Optional[Any] = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
UpperCAmelCase : Tuple = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCAmelCase : Tuple = 'cuda:1' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase : Dict = 'cuda:2' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase : Tuple = load_model_and_preprocess(
name=UpperCAmelCase_ , model_type=UpperCAmelCase_ , is_eval=UpperCAmelCase_ , device=UpperCAmelCase_ )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCAmelCase : Any = original_model.state_dict()
UpperCAmelCase : Tuple = create_rename_keys(UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase : Optional[Any] = state_dict.pop(UpperCAmelCase_ )
if key.startswith('Qformer.bert' ):
UpperCAmelCase : Dict = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCAmelCase : Optional[Any] = key.replace('self' , 'attention' )
if "llm_proj" in key:
UpperCAmelCase : Dict = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCAmelCase : Tuple = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
UpperCAmelCase : str = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
UpperCAmelCase : List[Any] = key.replace('t5' , 'language' )
UpperCAmelCase : int = val
# read in qv biases
read_in_q_v_bias(UpperCAmelCase_ , UpperCAmelCase_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = load_demo_image()
UpperCAmelCase : Optional[Any] = 'What is unusual about this image?'
# create processor
UpperCAmelCase : Optional[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = InstructBlipProcessor(
image_processor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ , )
UpperCAmelCase : List[Any] = processor(images=UpperCAmelCase_ , text=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# make sure processor creates exact same pixel values
UpperCAmelCase : Any = vis_processors['eval'](UpperCAmelCase_ ).unsqueeze(0 ).to(UpperCAmelCase_ )
UpperCAmelCase : Tuple = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCAmelCase_ )
original_model.to(UpperCAmelCase_ )
hf_model.to(UpperCAmelCase_ )
with torch.no_grad():
if "vicuna" in model_name:
UpperCAmelCase : Dict = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
UpperCAmelCase : List[Any] = hf_model(**UpperCAmelCase_ ).logits
else:
UpperCAmelCase : int = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
UpperCAmelCase : List[Any] = tokenizer('\n' , return_tensors='pt' ).input_ids.to(UpperCAmelCase_ )
UpperCAmelCase : Tuple = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 )
UpperCAmelCase : Optional[Any] = hf_model(**UpperCAmelCase_ , labels=UpperCAmelCase_ ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
UpperCAmelCase : int = 1E-4 if 'vicuna' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , UpperCAmelCase_ , atol=UpperCAmelCase_ )
print('Looks ok!' )
print('Generating with original model...' )
UpperCAmelCase : Tuple = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
UpperCAmelCase : Any = hf_model.generate(
**UpperCAmelCase_ , do_sample=UpperCAmelCase_ , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
UpperCAmelCase : Any = 2
print('Original generation:' , UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = processor.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
UpperCAmelCase : List[Any] = [text.strip() for text in output_text]
print('HF generation:' , UpperCAmelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
lowercase__ = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
lowercase__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 712 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = StableDiffusionPanoramaPipeline
UpperCAmelCase_ : Optional[int] = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ : Any = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase_ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCAmelCase : Any = DDIMScheduler()
torch.manual_seed(0 )
UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCAmelCase : Union[str, Any] = CLIPTextModel(lowercase_ )
UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase : Dict = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase_ ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str]=0 ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = torch.manual_seed(lowercase_ )
UpperCAmelCase : Any = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self : List[Any] ) -> int:
UpperCAmelCase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : str = StableDiffusionPanoramaPipeline(**lowercase_ )
UpperCAmelCase : Optional[Any] = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase : str = sd_pipe(**lowercase_ ).images
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Optional[int] = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : Any = StableDiffusionPanoramaPipeline(**lowercase_ )
UpperCAmelCase : Tuple = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : int = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase : str = 'french fries'
UpperCAmelCase : Optional[int] = sd_pipe(**lowercase_ , negative_prompt=lowercase_ )
UpperCAmelCase : Dict = output.images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Optional[int] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : List[Any] ) -> str:
UpperCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : Optional[int] = StableDiffusionPanoramaPipeline(**lowercase_ )
UpperCAmelCase : List[Any] = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase : Dict = sd_pipe(**lowercase_ , view_batch_size=2 )
UpperCAmelCase : Union[str, Any] = output.images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : str = self.get_dummy_components()
UpperCAmelCase : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' )
UpperCAmelCase : Any = StableDiffusionPanoramaPipeline(**lowercase_ )
UpperCAmelCase : Tuple = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase : Any = sd_pipe(**lowercase_ ).images
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[Any] = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Tuple = self.get_dummy_components()
UpperCAmelCase : Union[str, Any] = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , skip_prk_steps=lowercase_ )
UpperCAmelCase : int = StableDiffusionPanoramaPipeline(**lowercase_ )
UpperCAmelCase : Optional[int] = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Dict = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase : Any = sd_pipe(**lowercase_ ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Any = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Any=0 ) -> Tuple:
UpperCAmelCase : Dict = torch.manual_seed(lowercase_ )
UpperCAmelCase : Union[str, Any] = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Dict = 'stabilityai/stable-diffusion-2-base'
UpperCAmelCase : Dict = DDIMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
UpperCAmelCase : Any = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Dict = self.get_inputs()
UpperCAmelCase : Optional[int] = pipe(**lowercase_ ).images
UpperCAmelCase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
UpperCAmelCase : Dict = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=lowercase_ )
UpperCAmelCase : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Dict = self.get_inputs()
UpperCAmelCase : str = pipe(**lowercase_ ).images
UpperCAmelCase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
UpperCAmelCase : List[str] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : Any = 0
def callback_fn(lowercase_ : int , lowercase_ : int , lowercase_ : torch.FloatTensor ) -> None:
UpperCAmelCase : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase : Dict = latents[0, -3:, -3:, -1]
UpperCAmelCase : int = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCAmelCase : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase : Dict = latents[0, -3:, -3:, -1]
UpperCAmelCase : List[Any] = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCAmelCase : List[Any] = False
UpperCAmelCase : int = 'stabilityai/stable-diffusion-2-base'
UpperCAmelCase : Optional[Any] = DDIMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
UpperCAmelCase : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ )
UpperCAmelCase : Optional[int] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Tuple = self.get_inputs()
pipe(**lowercase_ , callback=lowercase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase : List[str] = 'stabilityai/stable-diffusion-2-base'
UpperCAmelCase : Dict = DDIMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
UpperCAmelCase : Any = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ )
UpperCAmelCase : Dict = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase : Optional[int] = self.get_inputs()
UpperCAmelCase : List[Any] = pipe(**lowercase_ )
UpperCAmelCase : Any = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 713 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
lowercase__ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
lowercase__ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
lowercase__ = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Any ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , lowercase_ : Any=False , lowercase_ : Union[str, Any]=False , lowercase_ : str=False , ) -> int:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCAmelCase : Optional[int] = np.array([re.sub(lowercase_ , '' , lowercase_ ) for x in predictions] )
UpperCAmelCase : Tuple = np.array([re.sub(lowercase_ , '' , lowercase_ ) for x in references] )
else:
UpperCAmelCase : str = np.asarray(lowercase_ )
UpperCAmelCase : List[str] = np.asarray(lowercase_ )
if ignore_case:
UpperCAmelCase : Dict = np.char.lower(lowercase_ )
UpperCAmelCase : str = np.char.lower(lowercase_ )
if ignore_punctuation:
UpperCAmelCase : Tuple = string.punctuation.maketrans('' , '' , string.punctuation )
UpperCAmelCase : Union[str, Any] = np.char.translate(lowercase_ , table=lowercase_ )
UpperCAmelCase : Optional[int] = np.char.translate(lowercase_ , table=lowercase_ )
if ignore_numbers:
UpperCAmelCase : Union[str, Any] = string.digits.maketrans('' , '' , string.digits )
UpperCAmelCase : Tuple = np.char.translate(lowercase_ , table=lowercase_ )
UpperCAmelCase : Any = np.char.translate(lowercase_ , table=lowercase_ )
UpperCAmelCase : Union[str, Any] = predictions == references
return {"exact_match": np.mean(lowercase_ ) * 100}
| 714 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCamelCase( UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
UpperCAmelCase : Optional[int] = precision
UpperCAmelCase : Optional[int] = ceil(precision / 14 )
UpperCAmelCase : List[str] = 42_68_80 * Decimal(1_00_05 ).sqrt()
UpperCAmelCase : Any = 1
UpperCAmelCase : str = 13_59_14_09
UpperCAmelCase : List[str] = Decimal(UpperCAmelCase_ )
for k in range(1 , UpperCAmelCase_ ):
UpperCAmelCase : int = factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCAmelCase_ ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowercase__ = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) <= 1:
return lst
UpperCAmelCase : List[str] = 1
while i < len(UpperCAmelCase_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCAmelCase : Union[str, Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCAmelCase : List[str] = 1
return lst
if __name__ == "__main__":
lowercase__ = input("Enter numbers separated by a comma:\n").strip()
lowercase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 716 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
lowercase__ = list[tuple[int, int]]
lowercase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A_ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : Node | None ) -> Optional[int]:
UpperCAmelCase : Optional[int] = pos_x
UpperCAmelCase : Tuple = pos_y
UpperCAmelCase : List[str] = (pos_y, pos_x)
UpperCAmelCase : List[Any] = goal_x
UpperCAmelCase : List[str] = goal_y
UpperCAmelCase : List[Any] = parent
class A_ :
'''simple docstring'''
def __init__( self : Dict , lowercase_ : tuple[int, int] , lowercase_ : tuple[int, int] ) -> Tuple:
UpperCAmelCase : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , lowercase_ )
UpperCAmelCase : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowercase_ )
UpperCAmelCase : Union[str, Any] = [self.start]
UpperCAmelCase : str = False
def UpperCAmelCase_ ( self : Any ) -> Path | None:
while self.node_queue:
UpperCAmelCase : List[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase : List[Any] = True
return self.retrace_path(lowercase_ )
UpperCAmelCase : Any = self.get_successors(lowercase_ )
for node in successors:
self.node_queue.append(lowercase_ )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Node ) -> list[Node]:
UpperCAmelCase : List[str] = []
for action in delta:
UpperCAmelCase : str = parent.pos_x + action[1]
UpperCAmelCase : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , lowercase_ ) )
return successors
def UpperCAmelCase_ ( self : str , lowercase_ : Node | None ) -> Path:
UpperCAmelCase : str = node
UpperCAmelCase : Dict = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase : List[Any] = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ) -> List[str]:
UpperCAmelCase : str = BreadthFirstSearch(lowercase_ , lowercase_ )
UpperCAmelCase : Optional[int] = BreadthFirstSearch(lowercase_ , lowercase_ )
UpperCAmelCase : List[str] = False
def UpperCAmelCase_ ( self : str ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCAmelCase : Any = self.fwd_bfs.node_queue.pop(0 )
UpperCAmelCase : Any = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCAmelCase : Tuple = True
return self.retrace_bidirectional_path(
lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = current_bwd_node
UpperCAmelCase : Tuple = current_fwd_node
UpperCAmelCase : Dict = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowercase_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowercase_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowercase_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase_ ( self : int , lowercase_ : Node , lowercase_ : Node ) -> Path:
UpperCAmelCase : Union[str, Any] = self.fwd_bfs.retrace_path(lowercase_ )
UpperCAmelCase : Any = self.bwd_bfs.retrace_path(lowercase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowercase__ = (0, 0)
lowercase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase__ = time.time()
lowercase__ = BreadthFirstSearch(init, goal)
lowercase__ = bfs.search()
lowercase__ = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
lowercase__ = time.time()
lowercase__ = BidirectionalBreadthFirstSearch(init, goal)
lowercase__ = bd_bfs.search()
lowercase__ = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 717 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 0 |
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowercase__ = logging.get_logger(__name__)
@dataclass
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : List[Any]=False , lowercase_ : int=False , lowercase_ : str=6.0 , lowercase_ : int=None , lowercase_ : List[str]=False , lowercase_ : Dict=False , lowercase_ : str=None , lowercase_ : Any="fp4" , lowercase_ : Optional[Any]=False , **lowercase_ : Union[str, Any] , ) -> int:
UpperCAmelCase : Optional[int] = load_in_abit
UpperCAmelCase : Union[str, Any] = load_in_abit
UpperCAmelCase : Tuple = llm_inta_threshold
UpperCAmelCase : int = llm_inta_skip_modules
UpperCAmelCase : str = llm_inta_enable_fpaa_cpu_offload
UpperCAmelCase : str = llm_inta_has_fpaa_weight
UpperCAmelCase : List[Any] = bnb_abit_quant_type
UpperCAmelCase : Optional[int] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCAmelCase : Any = torch.floataa
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Optional[Any] = getattr(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , torch.dtype ):
UpperCAmelCase : List[Any] = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def UpperCAmelCase_ ( self : int ) -> Tuple:
if not isinstance(self.llm_inta_threshold , lowercase_ ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowercase_ ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowercase_ ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , lowercase_ ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , lowercase_ ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , lowercase_ ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def UpperCAmelCase_ ( self : int ) -> Any:
return self.load_in_abit or self.load_in_abit
def UpperCAmelCase_ ( self : int ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]:
UpperCAmelCase : int = cls(**lowercase_ )
UpperCAmelCase : Dict = []
for key, value in kwargs.items():
if hasattr(lowercase_ , lowercase_ ):
setattr(lowercase_ , lowercase_ , lowercase_ )
to_remove.append(lowercase_ )
for key in to_remove:
kwargs.pop(lowercase_ , lowercase_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCAmelCase_ ( self : Any , lowercase_ : Union[str, os.PathLike] ) -> List[str]:
with open(lowercase_ , 'w' , encoding='utf-8' ) as writer:
UpperCAmelCase : Tuple = self.to_dict()
UpperCAmelCase : int = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + '\n'
writer.write(lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Dict[str, Any]:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[Any] = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self : Union[str, Any] ) -> Optional[Any]:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def UpperCAmelCase_ ( self : int , lowercase_ : bool = True ) -> str:
if use_diff is True:
UpperCAmelCase : str = self.to_diff_dict()
else:
UpperCAmelCase : List[str] = self.to_dict()
return json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + "\n"
def UpperCAmelCase_ ( self : Dict ) -> Dict[str, Any]:
UpperCAmelCase : List[str] = self.to_dict()
# get the default config dict
UpperCAmelCase : Any = BitsAndBytesConfig().to_dict()
UpperCAmelCase : Optional[Any] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCAmelCase : Any = value
return serializable_config_dict
| 718 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
from timeit import timeit
lowercase__ = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : str = len(UpperCAmelCase_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = len(UpperCAmelCase_ ) // 2
UpperCAmelCase : int = len(UpperCAmelCase_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(UpperCAmelCase_ ) )
def UpperCamelCase( UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) <= 2:
return True
if s[0] == s[len(UpperCAmelCase_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def UpperCamelCase( UpperCAmelCase_ ):
return s == s[::-1]
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = F"""all({name}(key) is value for key, value in test_data.items())"""
UpperCAmelCase : Optional[int] = F"""from __main__ import test_data, {name}"""
UpperCAmelCase : List[str] = 50_00_00
UpperCAmelCase : List[Any] = timeit(stmt=UpperCAmelCase_ , setup=UpperCAmelCase_ , number=UpperCAmelCase_ )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 719 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase : int = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCAmelCase : Optional[Any] = tokenizer('Hello there' , return_tensors='tf' ).input_ids
UpperCAmelCase : List[Any] = tokenizer('Hi I am' , return_tensors='tf' ).input_ids
UpperCAmelCase : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
UpperCAmelCase : Dict = -tf.math.reduce_mean(lowercase_ ).numpy()
UpperCAmelCase : Any = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 720 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=8 ):
UpperCAmelCase : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : UNetaDConditionModel , lowercase_ : DDPMScheduler , lowercase_ : VQModel , ) -> str:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> int:
if latents is None:
UpperCAmelCase : Union[str, Any] = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase : Optional[int] = latents.to(lowercase_ )
UpperCAmelCase : List[Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : int=0 ) -> Optional[int]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase : Optional[int] = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : List[Any]=0 ) -> Optional[int]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase : Optional[int] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase : Optional[Any] = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self : Union[str, Any] , lowercase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 100 , lowercase_ : float = 4.0 , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ) -> int:
UpperCAmelCase : List[str] = self._execution_device
UpperCAmelCase : int = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Dict = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Tuple = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase : str = self.scheduler.timesteps
UpperCAmelCase : Tuple = self.unet.config.in_channels
UpperCAmelCase : Tuple = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : str = {'image_embeds': image_embeds}
UpperCAmelCase : str = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase : Any = noise_pred.chunk(2 )
UpperCAmelCase : str = variance_pred.chunk(2 )
UpperCAmelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Any = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase : int = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase : Optional[Any] = image * 0.5 + 0.5
UpperCAmelCase : str = image.clamp(0 , 1 )
UpperCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase : Union[str, Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 721 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ ):
if not nums:
return 0
UpperCAmelCase : str = nums[0]
UpperCAmelCase : Optional[int] = 0
for num in nums[1:]:
UpperCAmelCase : str = (
max_excluding + num,
max(UpperCAmelCase_ , UpperCAmelCase_ ),
)
return max(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 50 ):
UpperCAmelCase : Optional[int] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 701 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 0 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase__ = 16
lowercase__ = 32
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = 16 , UpperCAmelCase_ = "bert-base-cased" ):
UpperCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Tuple = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase : Optional[Any] = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase_ , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCAmelCase : Any = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# Initialize accelerator
UpperCAmelCase : List[str] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase : Any = config['lr']
UpperCAmelCase : Tuple = int(config['num_epochs'] )
UpperCAmelCase : List[Any] = int(config['seed'] )
UpperCAmelCase : Tuple = int(config['batch_size'] )
UpperCAmelCase : str = args.model_name_or_path
set_seed(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
# Instantiate optimizer
UpperCAmelCase : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase : List[str] = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
UpperCAmelCase : str = 1
UpperCAmelCase : List[str] = (len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase_ , )
else:
UpperCAmelCase : List[Any] = DummyScheduler(UpperCAmelCase_ , total_num_steps=UpperCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase : Tuple = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase : Optional[int] = 0
# Now we train the model
UpperCAmelCase : Union[str, Any] = evaluate.load('glue' , 'mrpc' )
UpperCAmelCase : str = 0
UpperCAmelCase : int = {}
for epoch in range(UpperCAmelCase_ , UpperCAmelCase_ ):
model.train()
for step, batch in enumerate(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = model(**UpperCAmelCase_ )
UpperCAmelCase : List[Any] = outputs.loss
UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCAmelCase : str = 0
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase : Any = model(**UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase : Dict = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase_ ) - 1:
UpperCAmelCase : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
UpperCAmelCase : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase_ )
UpperCAmelCase : Any = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
UpperCAmelCase : Optional[int] = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase_ , default=3 , help='Number of train epochs.' , )
UpperCAmelCase : str = parser.parse_args()
UpperCAmelCase : str = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 702 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase__ = logging.get_logger("transformers.models.encodec")
lowercase__ = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
lowercase__ = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
lowercase__ = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
lowercase__ = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
lowercase__ = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase__ = []
lowercase__ = []
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
for attribute in key.split('.' ):
UpperCAmelCase : str = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
UpperCAmelCase : Optional[Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
UpperCAmelCase : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : List[str] = value
elif weight_type == "weight_v":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "bias":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "running_mean":
UpperCAmelCase : Dict = value
elif weight_type == "running_var":
UpperCAmelCase : List[str] = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase : List[Any] = value
elif weight_type == "weight_ih_l0":
UpperCAmelCase : Any = value
elif weight_type == "weight_hh_l0":
UpperCAmelCase : Optional[int] = value
elif weight_type == "bias_ih_l0":
UpperCAmelCase : List[str] = value
elif weight_type == "bias_hh_l0":
UpperCAmelCase : List[Any] = value
elif weight_type == "weight_ih_l1":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_hh_l1":
UpperCAmelCase : int = value
elif weight_type == "bias_ih_l1":
UpperCAmelCase : Tuple = value
elif weight_type == "bias_hh_l1":
UpperCAmelCase : str = value
else:
UpperCAmelCase : Dict = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase : List[Any] = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCAmelCase : str = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCAmelCase : List[Any] = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info(F"""{name} was ignored""" )
continue
UpperCAmelCase : int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCAmelCase : Optional[int] = key.split('.*.' )
if prefix in name and suffix in name:
UpperCAmelCase : List[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
UpperCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
UpperCAmelCase : Optional[int] = name.split(UpperCAmelCase_ )[0].split('.' )[-2]
UpperCAmelCase : Optional[int] = mapped_key.replace('*' , UpperCAmelCase_ )
if "weight_g" in name:
UpperCAmelCase : List[Any] = 'weight_g'
elif "weight_v" in name:
UpperCAmelCase : Union[str, Any] = 'weight_v'
elif "weight_ih_l0" in name:
UpperCAmelCase : Any = 'weight_ih_l0'
elif "weight_hh_l0" in name:
UpperCAmelCase : Union[str, Any] = 'weight_hh_l0'
elif "bias_ih_l0" in name:
UpperCAmelCase : Union[str, Any] = 'bias_ih_l0'
elif "bias_hh_l0" in name:
UpperCAmelCase : List[Any] = 'bias_hh_l0'
elif "weight_ih_l1" in name:
UpperCAmelCase : Union[str, Any] = 'weight_ih_l1'
elif "weight_hh_l1" in name:
UpperCAmelCase : List[Any] = 'weight_hh_l1'
elif "bias_ih_l1" in name:
UpperCAmelCase : List[Any] = 'bias_ih_l1'
elif "bias_hh_l1" in name:
UpperCAmelCase : Union[str, Any] = 'bias_hh_l1'
elif "bias" in name:
UpperCAmelCase : Optional[int] = 'bias'
elif "weight" in name:
UpperCAmelCase : Union[str, Any] = 'weight'
elif "running_mean" in name:
UpperCAmelCase : Any = 'running_mean'
elif "running_var" in name:
UpperCAmelCase : Optional[int] = 'running_var'
elif "num_batches_tracked" in name:
UpperCAmelCase : Dict = 'num_batches_tracked'
else:
UpperCAmelCase : Tuple = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , ):
if config_path is not None:
UpperCAmelCase : Dict = EncodecConfig.from_pretrained(UpperCAmelCase_ )
else:
UpperCAmelCase : List[Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCAmelCase : str = [8, 5, 4, 4]
UpperCAmelCase : str = [2.2]
UpperCAmelCase : List[Any] = 64
UpperCAmelCase : Tuple = 3_20_00
UpperCAmelCase : Tuple = 20_48
UpperCAmelCase : Any = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Optional[Any] = False
elif model_name == "encodec_48khz":
UpperCAmelCase : List[Any] = [8, 5, 4, 2]
UpperCAmelCase : Union[str, Any] = [3.0, 6.0, 12.0, 24.0]
UpperCAmelCase : Dict = 4_80_00
UpperCAmelCase : str = 2
UpperCAmelCase : Dict = False
UpperCAmelCase : List[str] = 'time_group_norm'
UpperCAmelCase : Any = True
UpperCAmelCase : Any = 1.0
UpperCAmelCase : List[Any] = 0.01
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCAmelCase : Dict = EncodecModel(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCAmelCase_ )
UpperCAmelCase : str = torch.load(UpperCAmelCase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCAmelCase : Dict = original_checkpoint['best_state']
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(UpperCAmelCase_ )
model.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 703 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 0 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
lowercase__ = 6378137.0
lowercase__ = 6356752.314245
lowercase__ = 6378137
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = (AXIS_A - AXIS_B) / AXIS_A
UpperCAmelCase : int = atan((1 - flattening) * tan(radians(UpperCAmelCase_ ) ) )
UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(UpperCAmelCase_ ) ) )
UpperCAmelCase : Dict = radians(UpperCAmelCase_ )
UpperCAmelCase : Any = radians(UpperCAmelCase_ )
# Equation
UpperCAmelCase : int = sin((phi_a - phi_a) / 2 )
UpperCAmelCase : Optional[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
UpperCAmelCase : Dict = sqrt(sin_sq_phi + (cos(UpperCAmelCase_ ) * cos(UpperCAmelCase_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = LxmertTokenizer
UpperCAmelCase_ : Any = LxmertTokenizerFast
UpperCAmelCase_ : int = True
UpperCAmelCase_ : int = True
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
super().setUp()
UpperCAmelCase : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self : Any , lowercase_ : List[str] ) -> Tuple:
UpperCAmelCase : Union[str, Any] = 'UNwant\u00E9d,running'
UpperCAmelCase : Any = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase : Any = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Tuple = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowercase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Any = self.get_tokenizer()
UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = 'I was born in 92000, and this is falsé.'
UpperCAmelCase : Dict = tokenizer.tokenize(lowercase_ )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
UpperCAmelCase : str = self.get_rust_tokenizer()
UpperCAmelCase : int = tokenizer.encode(lowercase_ )
UpperCAmelCase : int = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
| 705 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
UpperCAmelCase_ : Union[str, Any] = ["""pixel_values"""]
def __init__( self : List[Any] , lowercase_ : bool = True , lowercase_ : int = 32 , lowercase_ : Union[str, Any]=PILImageResampling.BILINEAR , lowercase_ : bool = True , **lowercase_ : Optional[Any] , ) -> None:
UpperCAmelCase : List[Any] = do_resize
UpperCAmelCase : int = do_rescale
UpperCAmelCase : Tuple = size_divisor
UpperCAmelCase : Any = resample
super().__init__(**lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : int , lowercase_ : str , lowercase_ : Optional[ChannelDimension] = None , **lowercase_ : Tuple ) -> np.ndarray:
UpperCAmelCase : Optional[Any] = get_image_size(lowercase_ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase : List[str] = height // size_divisor * size_divisor
UpperCAmelCase : Union[str, Any] = width // size_divisor * size_divisor
UpperCAmelCase : List[str] = resize(lowercase_ , (new_h, new_w) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
return image
def UpperCAmelCase_ ( self : Dict , lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : Optional[ChannelDimension] = None , **lowercase_ : Optional[Any] ) -> np.ndarray:
return rescale(image=lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , lowercase_ : Optional[bool] = None , lowercase_ : Optional[int] = None , lowercase_ : int=None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[TensorType, str]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : List[Any] , ) -> BatchFeature:
UpperCAmelCase : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Dict = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase : Tuple = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
UpperCAmelCase : List[Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
UpperCAmelCase : Any = [to_numpy_array(lowercase_ ) for img in images]
if do_resize:
UpperCAmelCase : Union[str, Any] = [self.resize(lowercase_ , size_divisor=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase : str = [self.rescale(lowercase_ , scale=1 / 255 ) for image in images]
UpperCAmelCase : Dict = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase : Tuple = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 706 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 707 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[Any] = PegasusConfig
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Any = """gelu"""
def __init__( self : str , lowercase_ : str , lowercase_ : int=13 , lowercase_ : Dict=7 , lowercase_ : Optional[Any]=True , lowercase_ : int=False , lowercase_ : Tuple=99 , lowercase_ : str=32 , lowercase_ : Tuple=2 , lowercase_ : int=4 , lowercase_ : Optional[int]=37 , lowercase_ : Any=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]=40 , lowercase_ : Tuple=2 , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=0 , ) -> List[str]:
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : str = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Any = pad_token_id
UpperCAmelCase : Union[str, Any] = bos_token_id
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : Optional[Any] = prepare_pegasus_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : List[Any] ) -> Optional[Any]:
UpperCAmelCase : Dict = TFPegasusModel(config=lowercase_ ).get_decoder()
UpperCAmelCase : Optional[int] = inputs_dict['input_ids']
UpperCAmelCase : int = input_ids[:1, :]
UpperCAmelCase : Dict = inputs_dict['attention_mask'][:1, :]
UpperCAmelCase : Optional[Any] = inputs_dict['head_mask']
UpperCAmelCase : Optional[Any] = 1
# first forward pass
UpperCAmelCase : List[str] = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ )
UpperCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : str = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : str = model(lowercase_ , attention_mask=lowercase_ )[0]
UpperCAmelCase : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1E-3 )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , ):
if attention_mask is None:
UpperCAmelCase : Tuple = tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase_ : Union[str, Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ : Tuple = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Union[str, Any] = False
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Dict = TFPegasusModelTester(self )
UpperCAmelCase : Any = ConfigTester(self , config_class=lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
UpperCAmelCase_ : Optional[Any] = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase_ : Tuple = """google/pegasus-xsum"""
@cached_property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase_ ( self : Any , **lowercase_ : str ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = self.translate_src_text(**lowercase_ )
assert self.expected_text == generated_words
def UpperCAmelCase_ ( self : Optional[Any] , **lowercase_ : Optional[int] ) -> str:
UpperCAmelCase : Any = self.tokenizer(self.src_text , **lowercase_ , padding=lowercase_ , return_tensors='tf' )
UpperCAmelCase : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase_ , )
UpperCAmelCase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase_ )
return generated_words
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
self._assert_generated_batch_equal_expected()
| 708 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCamelCase( UpperCAmelCase_ ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A_ :
'''simple docstring'''
def UpperCAmelCase_ ( self : Any , lowercase_ : Tuple , lowercase_ : Dict ) -> int:
pass
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
pass
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
pass
def UpperCAmelCase_ ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : List[Any]=None , **lowercase_ : List[str] ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
UpperCAmelCase : str = TFVisionTextDualEncoderModel(lowercase_ )
UpperCAmelCase : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ )
UpperCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ )
UpperCAmelCase : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : List[Any]=None , **lowercase_ : Union[str, Any] ) -> Dict:
UpperCAmelCase : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = {'vision_model': vision_model, 'text_model': text_model}
UpperCAmelCase : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
UpperCAmelCase : Dict = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Union[str, Any]=None , **lowercase_ : List[str] ) -> str:
UpperCAmelCase : Any = self.get_vision_text_model(lowercase_ , lowercase_ )
UpperCAmelCase : str = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ )
UpperCAmelCase : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
UpperCAmelCase : Tuple = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(lowercase_ )
UpperCAmelCase : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
UpperCAmelCase : str = after_output[0].numpy()
UpperCAmelCase : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-5 )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : int=None , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ )
UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ )
UpperCAmelCase : List[str] = model(
input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ )
UpperCAmelCase : Any = output.vision_model_output.attentions
self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase : Optional[int] = to_atuple(vision_model.config.image_size )
UpperCAmelCase : str = to_atuple(vision_model.config.patch_size )
UpperCAmelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase : Any = output.text_model_output.attentions
self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ) -> Tuple:
UpperCAmelCase : str = np.abs((a - b) ).max()
self.assertLessEqual(lowercase_ , lowercase_ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
UpperCAmelCase : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
UpperCAmelCase : int = self.prepare_config_and_inputs()
self.check_save_load(**lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase_ )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : Tuple = self.get_pretrained_model_and_inputs()
UpperCAmelCase : Optional[int] = model_a(**lowercase_ )
UpperCAmelCase : str = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = TFVisionTextDualEncoderModel.from_pretrained(lowercase_ )
UpperCAmelCase : Dict = model_a(**lowercase_ )
UpperCAmelCase : int = after_outputs[0].numpy()
UpperCAmelCase : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-5 )
@require_tf
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
UpperCAmelCase : Any = 13
UpperCAmelCase : int = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase : Optional[int] = random_attention_mask([batch_size, 4] )
UpperCAmelCase : List[str] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Tuple ) -> Any:
UpperCAmelCase : Optional[Any] = TFViTModel(lowercase_ , name='vision_model' )
UpperCAmelCase : List[str] = TFBertModel(lowercase_ , name='text_model' )
return vision_model, text_model
def UpperCAmelCase_ ( self : List[Any] ) -> str:
UpperCAmelCase : int = TFViTModelTester(self )
UpperCAmelCase : Optional[int] = TFBertModelTester(self )
UpperCAmelCase : List[Any] = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase : Tuple = vision_config_and_inputs
(
UpperCAmelCase
) : List[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
UpperCAmelCase : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
UpperCAmelCase : Optional[Any] = 13
UpperCAmelCase : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase : Optional[int] = random_attention_mask([batch_size, 4] )
UpperCAmelCase : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCAmelCase_ ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=None , **lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : List[str] = self.get_vision_text_model(lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ )
UpperCAmelCase : str = model(
input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ )
UpperCAmelCase : Dict = output.vision_model_output.attentions
self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase : str = to_atuple(vision_model.config.image_size )
UpperCAmelCase : Optional[int] = to_atuple(vision_model.config.patch_size )
UpperCAmelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase : Union[str, Any] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase : Any = output.text_model_output.attentions
self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple , lowercase_ : Optional[int] ) -> Dict:
UpperCAmelCase : Any = TFDeiTModel(lowercase_ , name='vision_model' )
UpperCAmelCase : List[Any] = TFRobertaModel(lowercase_ , name='text_model' )
return vision_model, text_model
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Dict = TFDeiTModelTester(self )
UpperCAmelCase : Optional[Any] = TFRobertaModelTester(self )
UpperCAmelCase : int = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase : Optional[int] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase : Union[str, Any] = vision_config_and_inputs
(
UpperCAmelCase
) : Tuple = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
UpperCAmelCase : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
UpperCAmelCase : List[Any] = 13
UpperCAmelCase : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase : str = random_attention_mask([batch_size, 4] )
UpperCAmelCase : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCAmelCase_ ( self : int , lowercase_ : List[Any] , lowercase_ : List[Any] ) -> Optional[Any]:
UpperCAmelCase : List[Any] = TFCLIPVisionModel(lowercase_ , name='vision_model' )
UpperCAmelCase : List[Any] = TFBertModel(lowercase_ , name='text_model' )
return vision_model, text_model
def UpperCAmelCase_ ( self : str ) -> int:
UpperCAmelCase : str = TFCLIPVisionModelTester(self )
UpperCAmelCase : int = TFBertModelTester(self )
UpperCAmelCase : Optional[int] = clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase : Tuple = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase : Tuple = vision_config_and_inputs
(
UpperCAmelCase
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=lowercase_ )
UpperCAmelCase : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
UpperCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCAmelCase : Union[str, Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowercase_ , padding=lowercase_ , return_tensors='np' )
UpperCAmelCase : int = model(**lowercase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCAmelCase : List[Any] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowercase_ , atol=1E-3 ) )
| 709 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
import os
import sys
import unittest
lowercase__ : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase__ : Tuple = os.path.join(git_repo_path, "src", "transformers")
lowercase__ : Optional[int] = "\n{0} = None\n"
lowercase__ : Optional[Any] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
lowercase__ : str = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase : Dict = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(lowercase_ )
UpperCAmelCase : Dict = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(lowercase_ , 'tokenizers' )
UpperCAmelCase : str = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(lowercase_ , 'tensorflow_text' )
UpperCAmelCase : Union[str, Any] = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(lowercase_ , 'sentencepiece_and_tokenizers' )
UpperCAmelCase : List[str] = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(lowercase_ , 'sentencepiece_and_tensorflow_text' )
UpperCAmelCase : List[str] = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(lowercase_ , 'sentencepiece_and_tokenizers_and_vision' )
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Any = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowercase_ )
self.assertIn('tensorflow_text' , lowercase_ )
self.assertIn('sentencepiece_and_tokenizers' , lowercase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : int = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowercase_ , '\nCONSTANT = None\n' )
UpperCAmelCase : Union[str, Any] = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowercase_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
UpperCAmelCase : Union[str, Any] = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
UpperCAmelCase : List[Any] = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
UpperCAmelCase : Optional[int] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
UpperCAmelCase : int = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowercase_ )
| 710 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> List[str]:
# Initialise PyTorch model
UpperCAmelCase : Any = RemBertConfig.from_json_file(UpperCAmelCase_ )
print('Building PyTorch model from configuration: {}'.format(str(UpperCAmelCase_ ) ) )
UpperCAmelCase : Union[str, Any] = RemBertModel(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(UpperCAmelCase_ ) )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 711 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : Dict , *lowercase_ : Optional[int] , lowercase_ : Tuple=None , lowercase_ : List[str]=None , **lowercase_ : Tuple ) -> Dict:
super().__init__(*lowercase_ , **lowercase_ )
UpperCAmelCase : Tuple = eval_examples
UpperCAmelCase : Dict = post_process_function
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Optional[Dataset] = None , lowercase_ : Any=None , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "eval" , **lowercase_ : int , ) -> Dict[str, float]:
UpperCAmelCase : Tuple = gen_kwargs.copy()
UpperCAmelCase : int = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
UpperCAmelCase : Tuple = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
UpperCAmelCase : Optional[Any] = gen_kwargs
UpperCAmelCase : str = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase : Optional[Any] = self.get_eval_dataloader(lowercase_ )
UpperCAmelCase : Dict = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase : Dict = self.compute_metrics
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Tuple = time.time()
UpperCAmelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase : str = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
UpperCAmelCase : Union[str, Any] = compute_metrics
UpperCAmelCase : Optional[int] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase : int = self.post_process_function(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase : List[Any] = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
UpperCAmelCase : Union[str, Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[int]=None , lowercase_ : str = "test" , **lowercase_ : int ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = gen_kwargs.copy()
UpperCAmelCase : List[Any] = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase : List[Any] = self.compute_metrics
UpperCAmelCase : Dict = None
UpperCAmelCase : str = time.time()
UpperCAmelCase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase : Optional[int] = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
UpperCAmelCase : Dict = compute_metrics
UpperCAmelCase : Optional[int] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase : List[str] = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict' )
UpperCAmelCase : Tuple = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase : Optional[int] = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 712 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 0 |
'''simple docstring'''
import math
def UpperCamelCase( UpperCAmelCase_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase( UpperCAmelCase_ = 0.1 ):
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Optional[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(UpperCAmelCase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ) -> int:
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ) -> str:
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ) -> Any:
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ) -> Any:
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ) -> Optional[int]:
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ) -> Any:
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ) -> Dict:
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 714 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 0 |
from __future__ import annotations
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(UpperCAmelCase_ ).json()
def UpperCamelCase( UpperCAmelCase_ = 10 ):
UpperCAmelCase : str = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
UpperCAmelCase : List[str] = requests.get(UpperCAmelCase_ ).json()[:max_stories]
return [get_hackernews_story(UpperCAmelCase_ ) for story_id in story_ids]
def UpperCamelCase( UpperCAmelCase_ = 10 ):
UpperCAmelCase : Tuple = hackernews_top_stories(UpperCAmelCase_ )
return "\n".join('* [{title}]({url})'.format(**UpperCAmelCase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Any = """deberta-v2"""
def __init__( self : Any , lowercase_ : List[str]=128_100 , lowercase_ : Optional[Any]=1_536 , lowercase_ : Tuple=24 , lowercase_ : Optional[int]=24 , lowercase_ : List[str]=6_144 , lowercase_ : List[Any]="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[int]=0.02 , lowercase_ : List[str]=1E-7 , lowercase_ : List[Any]=False , lowercase_ : List[str]=-1 , lowercase_ : Dict=0 , lowercase_ : List[str]=True , lowercase_ : Optional[int]=None , lowercase_ : List[str]=0 , lowercase_ : Optional[int]="gelu" , **lowercase_ : Optional[Any] , ) -> Dict:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = intermediate_size
UpperCAmelCase : Dict = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : Optional[int] = relative_attention
UpperCAmelCase : Union[str, Any] = max_relative_positions
UpperCAmelCase : Tuple = pad_token_id
UpperCAmelCase : Optional[int] = position_biased_input
# Backwards compatibility
if type(lowercase_ ) == str:
UpperCAmelCase : Optional[Any] = [x.strip() for x in pos_att_type.lower().split('|' )]
UpperCAmelCase : Union[str, Any] = pos_att_type
UpperCAmelCase : int = vocab_size
UpperCAmelCase : List[Any] = layer_norm_eps
UpperCAmelCase : int = kwargs.get('pooler_hidden_size' , lowercase_ )
UpperCAmelCase : str = pooler_dropout
UpperCAmelCase : Any = pooler_hidden_act
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
return 12
def UpperCAmelCase_ ( self : str , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 3 , lowercase_ : int = 40 , lowercase_ : int = 40 , lowercase_ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
UpperCAmelCase : Any = super().generate_dummy_inputs(preprocessor=lowercase_ , framework=lowercase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 716 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ = 250004
lowercase__ = 250020
@require_sentencepiece
@require_tokenizers
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = MBartaaTokenizer
UpperCAmelCase_ : int = MBartaaTokenizerFast
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : str = True
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = MBartaaTokenizer(lowercase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : str = '<s>'
UpperCAmelCase : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
UpperCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowercase_ ) , 1_054 )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : List[Any] = MBartaaTokenizer(lowercase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowercase_ )
UpperCAmelCase : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Union[str, Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
# fmt: off
UpperCAmelCase : Dict = {'input_ids': [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase : List[Any] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : Union[str, Any] = tokenizer_r.save_pretrained(lowercase_ )
UpperCAmelCase : Optional[Any] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCAmelCase : Optional[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
UpperCAmelCase : Tuple = tokenizer_r.from_pretrained(lowercase_ )
UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : int = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
UpperCAmelCase : Union[str, Any] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
UpperCAmelCase : Tuple = tokenizer_r.from_pretrained(lowercase_ )
UpperCAmelCase : str = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : str = tokenizer_r.from_pretrained(lowercase_ )
UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """facebook/mbart-large-50-one-to-many-mmt"""
UpperCAmelCase_ : Optional[int] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCAmelCase_ : Any = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCAmelCase_ : int = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def UpperCAmelCase_ ( cls : List[str] ) -> Any:
UpperCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
UpperCAmelCase : Any = 1
return cls
def UpperCAmelCase_ ( self : int ) -> List[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 250_038 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
self.assertIn(lowercase_ , self.tokenizer.all_special_ids )
UpperCAmelCase : str = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
UpperCAmelCase : Dict = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
UpperCAmelCase : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertNotIn(self.tokenizer.eos_token , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> int:
UpperCAmelCase : Any = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , lowercase_ )
UpperCAmelCase : str = 10
UpperCAmelCase : List[str] = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[0] , lowercase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250_053, 250_001] )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Tuple = tempfile.mkdtemp()
UpperCAmelCase : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = MBartaaTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase_ , return_tensors='pt' )
UpperCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
UpperCAmelCase : List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
UpperCAmelCase : Dict = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors='pt' )
UpperCAmelCase : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors='pt' )
UpperCAmelCase : List[str] = targets['input_ids']
UpperCAmelCase : List[Any] = shift_tokens_right(lowercase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
UpperCAmelCase : Any = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(lowercase_ ) , {
# en_XX, A, test, EOS
'input_ids': [[250_004, 62, 3_034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250_001,
} , )
| 717 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """data2vec-vision"""
def __init__( self : int , lowercase_ : List[str]=768 , lowercase_ : Dict=12 , lowercase_ : int=12 , lowercase_ : str=3_072 , lowercase_ : Optional[int]="gelu" , lowercase_ : Any=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : int=1E-12 , lowercase_ : int=224 , lowercase_ : Dict=16 , lowercase_ : List[Any]=3 , lowercase_ : Union[str, Any]=False , lowercase_ : Dict=False , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : int=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=True , lowercase_ : Optional[int]=[3, 5, 7, 11] , lowercase_ : str=[1, 2, 3, 6] , lowercase_ : Any=True , lowercase_ : List[str]=0.4 , lowercase_ : List[str]=256 , lowercase_ : int=1 , lowercase_ : str=False , lowercase_ : str=255 , **lowercase_ : Dict , ) -> Optional[Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : str = image_size
UpperCAmelCase : Tuple = patch_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : Dict = use_mask_token
UpperCAmelCase : int = use_absolute_position_embeddings
UpperCAmelCase : Optional[int] = use_relative_position_bias
UpperCAmelCase : Optional[Any] = use_shared_relative_position_bias
UpperCAmelCase : Union[str, Any] = layer_scale_init_value
UpperCAmelCase : Optional[int] = drop_path_rate
UpperCAmelCase : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase : Optional[Any] = out_indices
UpperCAmelCase : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase : Optional[int] = use_auxiliary_head
UpperCAmelCase : Optional[Any] = auxiliary_loss_weight
UpperCAmelCase : str = auxiliary_channels
UpperCAmelCase : Any = auxiliary_num_convs
UpperCAmelCase : int = auxiliary_concat_input
UpperCAmelCase : Union[str, Any] = semantic_loss_ignore_index
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = version.parse("""1.11""" )
@property
def UpperCAmelCase_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self : int ) -> float:
return 1E-4
| 719 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
lowercase__ = {"allegro/herbert-base-cased": 514}
lowercase__ = {}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = VOCAB_FILES_NAMES
UpperCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Union[str, Any] = HerbertTokenizer
def __init__( self : Optional[Any] , lowercase_ : List[Any]=None , lowercase_ : Tuple=None , lowercase_ : Tuple=None , lowercase_ : List[str]="<s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Any="<pad>" , lowercase_ : Tuple="<mask>" , lowercase_ : Dict="</s>" , **lowercase_ : str , ) -> Dict:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sep_token=lowercase_ , **lowercase_ , )
def UpperCAmelCase_ ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : int = [self.cls_token_id]
UpperCAmelCase : Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : int = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 720 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 721 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 0 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowercase__ : Tuple = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowercase__ : List[str] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
lowercase__ : List[str] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Tuple=None , lowercase_ : str=None , lowercase_ : Optional[int]=False ) -> str:
if concatenate_texts:
return compute_measures(lowercase_ , lowercase_ )["wer"]
else:
UpperCAmelCase : int = 0
UpperCAmelCase : List[str] = 0
for prediction, reference in zip(lowercase_ , lowercase_ ):
UpperCAmelCase : List[str] = compute_measures(lowercase_ , lowercase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 700 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : Tuple = tempfile.mkdtemp()
UpperCAmelCase : List[Any] = BlipImageProcessor()
UpperCAmelCase : str = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
UpperCAmelCase : Optional[Any] = BlipProcessor(lowercase_ , lowercase_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : List[Any] , **lowercase_ : int ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).tokenizer
def UpperCAmelCase_ ( self : int , **lowercase_ : Union[str, Any] ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase : str = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCAmelCase : str = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
UpperCAmelCase : List[str] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Any:
UpperCAmelCase : List[Any] = self.get_image_processor()
UpperCAmelCase : List[str] = self.get_tokenizer()
UpperCAmelCase : int = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase : str = self.prepare_image_inputs()
UpperCAmelCase : Dict = image_processor(lowercase_ , return_tensors='np' )
UpperCAmelCase : Optional[Any] = processor(images=lowercase_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.get_image_processor()
UpperCAmelCase : List[Any] = self.get_tokenizer()
UpperCAmelCase : Optional[int] = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase : Tuple = 'lower newer'
UpperCAmelCase : Tuple = processor(text=lowercase_ )
UpperCAmelCase : List[Any] = tokenizer(lowercase_ , return_token_type_ids=lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = self.get_image_processor()
UpperCAmelCase : Optional[int] = self.get_tokenizer()
UpperCAmelCase : str = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase : List[str] = 'lower newer'
UpperCAmelCase : Any = self.prepare_image_inputs()
UpperCAmelCase : List[Any] = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : str = self.get_image_processor()
UpperCAmelCase : Any = self.get_tokenizer()
UpperCAmelCase : Optional[Any] = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase : List[str] = processor.batch_decode(lowercase_ )
UpperCAmelCase : str = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = self.get_image_processor()
UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase : Dict = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase : Optional[int] = 'lower newer'
UpperCAmelCase : List[str] = self.prepare_image_inputs()
UpperCAmelCase : Optional[Any] = processor(text=lowercase_ , images=lowercase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 701 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 0 |
'''simple docstring'''
import numpy
# List of input, output pairs
lowercase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowercase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowercase__ = [2, 4, 1, 5]
lowercase__ = len(train_data)
lowercase__ = 0.009
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_="train" ):
return calculate_hypothesis_value(UpperCAmelCase_ , UpperCAmelCase_ ) - output(
UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : int = 0
for i in range(len(UpperCAmelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=m ):
UpperCAmelCase : Dict = 0
for i in range(UpperCAmelCase_ ):
if index == -1:
summation_value += _error(UpperCAmelCase_ )
else:
summation_value += _error(UpperCAmelCase_ ) * train_data[i][0][index]
return summation_value
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = summation_of_cost_derivative(UpperCAmelCase_ , UpperCAmelCase_ ) / m
return cost_derivative_value
def UpperCamelCase( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase : List[str] = 0.00_0002
UpperCAmelCase : str = 0
UpperCAmelCase : str = 0
while True:
j += 1
UpperCAmelCase : Any = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase_ ) ):
UpperCAmelCase : List[Any] = get_cost_derivative(i - 1 )
UpperCAmelCase : Union[str, Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase_ , UpperCAmelCase_ , atol=UpperCAmelCase_ , rtol=UpperCAmelCase_ , ):
break
UpperCAmelCase : List[Any] = temp_parameter_vector
print(('Number of iterations:', j) )
def UpperCamelCase( ):
for i in range(len(UpperCAmelCase_ ) ):
print(('Actual output value:', output(UpperCAmelCase_ , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(UpperCAmelCase_ , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 702 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 703 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 0 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def UpperCamelCase( UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = F"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCAmelCase_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(UpperCAmelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 705 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 0 |
'''simple docstring'''
import pytest
lowercase__ = "__dummy_dataset1__"
lowercase__ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def UpperCamelCase( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = dataset_loading_script_name
UpperCAmelCase : Tuple = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=UpperCAmelCase_ )
UpperCAmelCase : Any = script_dir / F"""{script_name}.py"""
with open(UpperCAmelCase_ , 'w' ) as f:
f.write(UpperCAmelCase_ )
return str(UpperCAmelCase_ )
| 706 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 707 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """wavlm"""
def __init__( self : Tuple , lowercase_ : List[str]=32 , lowercase_ : Union[str, Any]=768 , lowercase_ : Dict=12 , lowercase_ : Any=12 , lowercase_ : List[Any]=3_072 , lowercase_ : str="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : str=1E-5 , lowercase_ : Optional[Any]="group" , lowercase_ : int="gelu" , lowercase_ : Any=(512, 512, 512, 512, 512, 512, 512) , lowercase_ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=128 , lowercase_ : Union[str, Any]=16 , lowercase_ : Tuple=320 , lowercase_ : Tuple=800 , lowercase_ : Dict=False , lowercase_ : Optional[Any]=True , lowercase_ : List[Any]=0.05 , lowercase_ : Tuple=10 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : str=320 , lowercase_ : Optional[Any]=2 , lowercase_ : List[Any]=0.1 , lowercase_ : str=100 , lowercase_ : List[Any]=256 , lowercase_ : List[str]=256 , lowercase_ : int=0.1 , lowercase_ : int="mean" , lowercase_ : Any=False , lowercase_ : str=False , lowercase_ : str=256 , lowercase_ : Dict=(512, 512, 512, 512, 1_500) , lowercase_ : Union[str, Any]=(5, 3, 3, 1, 1) , lowercase_ : Any=(1, 2, 3, 1, 1) , lowercase_ : Any=512 , lowercase_ : Any=80 , lowercase_ : Union[str, Any]=0 , lowercase_ : Any=1 , lowercase_ : Optional[int]=2 , lowercase_ : List[Any]=False , lowercase_ : List[Any]=3 , lowercase_ : Tuple=2 , lowercase_ : Union[str, Any]=3 , lowercase_ : Any=None , **lowercase_ : Dict , ) -> Optional[int]:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : List[str] = feat_extract_norm
UpperCAmelCase : List[str] = feat_extract_activation
UpperCAmelCase : Any = list(lowercase_ )
UpperCAmelCase : int = list(lowercase_ )
UpperCAmelCase : List[str] = list(lowercase_ )
UpperCAmelCase : Optional[int] = conv_bias
UpperCAmelCase : Optional[int] = num_buckets
UpperCAmelCase : Optional[int] = max_bucket_distance
UpperCAmelCase : Tuple = num_conv_pos_embeddings
UpperCAmelCase : Optional[int] = num_conv_pos_embedding_groups
UpperCAmelCase : List[str] = len(self.conv_dim )
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Optional[int] = hidden_dropout
UpperCAmelCase : Optional[int] = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : int = feat_proj_dropout
UpperCAmelCase : Any = final_dropout
UpperCAmelCase : Any = layerdrop
UpperCAmelCase : Dict = layer_norm_eps
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Tuple = num_ctc_classes
UpperCAmelCase : int = vocab_size
UpperCAmelCase : str = do_stable_layer_norm
UpperCAmelCase : Tuple = use_weighted_layer_sum
UpperCAmelCase : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Tuple = apply_spec_augment
UpperCAmelCase : Optional[int] = mask_time_prob
UpperCAmelCase : int = mask_time_length
UpperCAmelCase : Optional[int] = mask_time_min_masks
UpperCAmelCase : Tuple = mask_feature_prob
UpperCAmelCase : Dict = mask_feature_length
# parameters for pretraining with codevector quantized representations
UpperCAmelCase : Union[str, Any] = num_codevectors_per_group
UpperCAmelCase : str = num_codevector_groups
UpperCAmelCase : Any = contrastive_logits_temperature
UpperCAmelCase : Dict = num_negatives
UpperCAmelCase : Optional[Any] = codevector_dim
UpperCAmelCase : Optional[int] = proj_codevector_dim
UpperCAmelCase : Tuple = diversity_loss_weight
# ctc loss
UpperCAmelCase : List[str] = ctc_loss_reduction
UpperCAmelCase : Optional[int] = ctc_zero_infinity
# adapter
UpperCAmelCase : Dict = add_adapter
UpperCAmelCase : Optional[Any] = adapter_kernel_size
UpperCAmelCase : Dict = adapter_stride
UpperCAmelCase : Optional[Any] = num_adapter_layers
UpperCAmelCase : Tuple = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : str = list(lowercase_ )
UpperCAmelCase : Optional[Any] = list(lowercase_ )
UpperCAmelCase : List[Any] = list(lowercase_ )
UpperCAmelCase : Optional[Any] = xvector_output_dim
@property
def UpperCAmelCase_ ( self : str ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 708 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json"}
lowercase__ = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowercase__ = {"mgp-str": 27}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , lowercase_ : Tuple , lowercase_ : List[Any]="[GO]" , lowercase_ : List[Any]="[GO]" , lowercase_ : str="[s]" , lowercase_ : List[str]="[GO]" , **lowercase_ : int ) -> Optional[int]:
super().__init__(
unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase : Any = json.load(lowercase_ )
UpperCAmelCase : Tuple = {v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase_ ( self : Any ) -> int:
return len(self.vocab )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[Any]:
UpperCAmelCase : str = []
for s in text:
char_tokens.extend(lowercase_ )
return char_tokens
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Optional[int] ) -> Optional[Any]:
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self : int , lowercase_ : Union[str, Any] ) -> Optional[int]:
return self.decoder.get(lowercase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowercase_ ) )
return
UpperCAmelCase : Any = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(lowercase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + '\n' )
return (vocab_file,)
| 709 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Optional[int] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """openai-gpt"""
UpperCAmelCase_ : Union[str, Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , lowercase_ : Union[str, Any]=40_478 , lowercase_ : Tuple=512 , lowercase_ : List[str]=768 , lowercase_ : int=12 , lowercase_ : Any=12 , lowercase_ : Optional[int]="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : str=0.1 , lowercase_ : Dict=0.1 , lowercase_ : str=1E-5 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Tuple="cls_index" , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=None , lowercase_ : Dict=True , lowercase_ : Any=0.1 , **lowercase_ : Union[str, Any] , ) -> Union[str, Any]:
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Tuple = n_positions
UpperCAmelCase : int = n_embd
UpperCAmelCase : Optional[Any] = n_layer
UpperCAmelCase : List[Any] = n_head
UpperCAmelCase : List[str] = afn
UpperCAmelCase : int = resid_pdrop
UpperCAmelCase : Optional[int] = embd_pdrop
UpperCAmelCase : Optional[Any] = attn_pdrop
UpperCAmelCase : Optional[Any] = layer_norm_epsilon
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Union[str, Any] = summary_type
UpperCAmelCase : int = summary_use_proj
UpperCAmelCase : Any = summary_activation
UpperCAmelCase : List[str] = summary_first_dropout
UpperCAmelCase : Optional[Any] = summary_proj_to_labels
super().__init__(**lowercase_ )
| 710 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowercase__ = "docs/source/en/_toctree.yml"
def UpperCamelCase( UpperCAmelCase_ ) -> Any:
UpperCAmelCase : List[str] = defaultdict(UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(UpperCAmelCase_ )
UpperCAmelCase : Any = new_doc_list
UpperCAmelCase : List[str] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Any = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(UpperCAmelCase_ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
UpperCAmelCase : int = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCAmelCase_ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(UpperCAmelCase_ )
# Sort
return overview_doc
def UpperCamelCase( UpperCAmelCase_=False ) -> Dict:
with open(UpperCAmelCase_ , encoding='utf-8' ) as f:
UpperCAmelCase : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : List[str] = content[api_idx]['sections']
# Then to the model doc
UpperCAmelCase : str = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCAmelCase : int = api_doc[scheduler_idx]['sections']
UpperCAmelCase : Tuple = clean_doc_toc(UpperCAmelCase_ )
UpperCAmelCase : List[str] = False
if new_scheduler_doc != scheduler_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Optional[Any] = new_scheduler_doc
if diff:
if overwrite:
UpperCAmelCase : Optional[int] = api_doc
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def UpperCamelCase( UpperCAmelCase_=False ) -> Union[str, Any]:
with open(UpperCAmelCase_ , encoding='utf-8' ) as f:
UpperCAmelCase : List[str] = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : int = content[api_idx]['sections']
# Then to the model doc
UpperCAmelCase : Union[str, Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Union[str, Any] = api_doc[pipeline_idx]['sections']
UpperCAmelCase : List[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCAmelCase : int = pipeline_doc['section']
UpperCAmelCase : List[str] = clean_doc_toc(UpperCAmelCase_ )
if overwrite:
UpperCAmelCase : List[str] = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCAmelCase_ )
# sort overall pipeline doc
UpperCAmelCase : Optional[Any] = clean_doc_toc(UpperCAmelCase_ )
if new_pipeline_docs != pipeline_docs:
UpperCAmelCase : Any = True
if overwrite:
UpperCAmelCase : str = new_pipeline_docs
if diff:
if overwrite:
UpperCAmelCase : Tuple = api_doc
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 711 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowercase__ = "path-to-your-trained-model"
lowercase__ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
lowercase__ = "A photo of sks dog in a bucket"
lowercase__ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 712 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A_ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Any , lowercase_ : Union[str, Any]=13 , lowercase_ : int=7 , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[int]=True , lowercase_ : int=False , lowercase_ : List[Any]=True , lowercase_ : List[str]=33 , lowercase_ : Tuple=32 , lowercase_ : Dict=5 , lowercase_ : List[Any]=4 , lowercase_ : Union[str, Any]=37 , lowercase_ : Any="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : List[str]=16 , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=3 , lowercase_ : Dict=4 , lowercase_ : Optional[int]=None , ) -> Optional[Any]:
UpperCAmelCase : Any = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : int = is_training
UpperCAmelCase : Optional[Any] = use_input_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : List[str] = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : int = num_choices
UpperCAmelCase : Any = scope
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict = None
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> Any:
UpperCAmelCase : Tuple = EsmModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : Dict = model(lowercase_ , attention_mask=lowercase_ )
UpperCAmelCase : Dict = model(lowercase_ )
UpperCAmelCase : Tuple = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[str] ) -> Any:
UpperCAmelCase : List[Any] = EsmForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.num_labels
UpperCAmelCase : Union[str, Any] = EsmForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : Dict ) -> Any:
UpperCAmelCase : int = self.prepare_config_and_inputs()
(
UpperCAmelCase
) : str = config_and_inputs
UpperCAmelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : int = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : Tuple = ()
UpperCAmelCase_ : Any = (
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : List[str] = True
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = EsmModelTester(self )
UpperCAmelCase : Any = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : str = type
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> List[str]:
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self : int ) -> List[str]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[int] = EsmModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase : Union[str, Any] = EsmEmbeddings(config=lowercase_ )
UpperCAmelCase : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
UpperCAmelCase : List[str] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
UpperCAmelCase : Dict = create_position_ids_from_input_ids(lowercase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase_ , lowercase_ ) ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase : Optional[int] = EsmEmbeddings(config=lowercase_ )
UpperCAmelCase : List[Any] = torch.empty(2 , 4 , 30 )
UpperCAmelCase : Optional[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
UpperCAmelCase : Optional[Any] = torch.as_tensor([expected_single_positions, expected_single_positions] )
UpperCAmelCase : int = embeddings.create_position_ids_from_inputs_embeds(lowercase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowercase_ , lowercase_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
pass
@require_torch
class A_ ( _snake_case ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
with torch.no_grad():
UpperCAmelCase : List[str] = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
UpperCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase : List[Any] = model(lowercase_ )[0]
UpperCAmelCase : Union[str, Any] = 33
UpperCAmelCase : Optional[Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase : List[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
with torch.no_grad():
UpperCAmelCase : List[Any] = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
UpperCAmelCase : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase : Tuple = model(lowercase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase : List[str] = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 713 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
from collections import namedtuple
lowercase__ = namedtuple("from_to", "from_ to")
lowercase__ = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Optional[Any]:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(UpperCAmelCase_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(UpperCAmelCase_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : Dict , *lowercase_ : List[str] , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , **lowercase_ : Any ) -> Optional[int]:
super().__init__(*lowercase_ , **lowercase_ )
UpperCAmelCase : Any = eval_examples
UpperCAmelCase : Tuple = post_process_function
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : List[Any]=None , lowercase_ : Dict=None , lowercase_ : Optional[int]=None , lowercase_ : str = "eval" ) -> Any:
UpperCAmelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase : List[str] = self.get_eval_dataloader(lowercase_ )
UpperCAmelCase : int = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase : List[str] = self.compute_metrics
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase : List[str] = time.time()
try:
UpperCAmelCase : List[str] = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
UpperCAmelCase : Any = compute_metrics
UpperCAmelCase : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase : Dict = self.post_process_function(lowercase_ , lowercase_ , output.predictions )
UpperCAmelCase : Optional[int] = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase : Tuple = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
UpperCAmelCase : Optional[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase : Union[str, Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : str=None , lowercase_ : str = "test" ) -> Optional[int]:
UpperCAmelCase : Dict = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase : List[Any] = self.compute_metrics
UpperCAmelCase : Any = None
UpperCAmelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase : Union[str, Any] = time.time()
try:
UpperCAmelCase : int = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
UpperCAmelCase : Dict = compute_metrics
UpperCAmelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase : Optional[Any] = self.post_process_function(lowercase_ , lowercase_ , output.predictions , 'predict' )
UpperCAmelCase : Tuple = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase : Optional[Any] = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase__ = sys.version_info >= (3, 10)
def UpperCamelCase( UpperCAmelCase_=None , UpperCAmelCase_=None ):
return field(default_factory=lambda: default , metadata=UpperCAmelCase_ )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : int
UpperCAmelCase_ : float
UpperCAmelCase_ : str
UpperCAmelCase_ : bool
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : int = 42
UpperCAmelCase_ : str = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : Optional[bool] = None
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """titi"""
UpperCAmelCase_ : int = """toto"""
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """titi"""
UpperCAmelCase_ : Dict = """toto"""
UpperCAmelCase_ : Dict = 42
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : BasicEnum = "toto"
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
UpperCAmelCase : Tuple = BasicEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : MixedTypeEnum = "toto"
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = MixedTypeEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[float] = field(default=_snake_case , metadata={"""help""": """help message"""} )
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[List[str]] = list_field(default=[] )
UpperCAmelCase_ : Optional[List[int]] = list_field(default=[] )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int] = list_field(default=[] )
UpperCAmelCase_ : List[int] = list_field(default=[1, 2, 3] )
UpperCAmelCase_ : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
UpperCAmelCase_ : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int] = field()
UpperCAmelCase_ : str = field()
UpperCAmelCase_ : BasicEnum = field()
def UpperCAmelCase_ ( self : str ) -> Dict:
UpperCAmelCase : Tuple = BasicEnum(self.required_enum )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : int
UpperCAmelCase_ : "BasicEnum" = field()
UpperCAmelCase_ : "Optional[bool]" = None
UpperCAmelCase_ : "str" = field(default="""toto""" , metadata={"""help""": """help message"""} )
UpperCAmelCase_ : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : bool | None = None
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : int | None = None
UpperCAmelCase_ : float | None = field(default=_snake_case , metadata={"""help""": """help message"""} )
UpperCAmelCase_ : str | None = None
UpperCAmelCase_ : list[str] | None = list_field(default=[] )
UpperCAmelCase_ : list[int] | None = list_field(default=[] )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : argparse.ArgumentParser , lowercase_ : argparse.ArgumentParser ) -> int:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCAmelCase : Optional[Any] = {k: v for k, v in vars(lowercase_ ).items() if k != 'container'}
UpperCAmelCase : List[Any] = {k: v for k, v in vars(lowercase_ ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_ ) and yy.get('choices' , lowercase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_ ) , yy['type'](lowercase_ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
UpperCAmelCase : Dict = HfArgumentParser(lowercase_ )
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_ )
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_ )
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_ )
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?' )
self.argparsersEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Dict = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
(UpperCAmelCase ) : Union[str, Any] = parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_ )
self.assertFalse(example.flag )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : List[str] = HfArgumentParser(lowercase_ )
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_ )
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message' )
self.argparsersEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Tuple:
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?' )
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz' )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_ )
UpperCAmelCase : List[Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
UpperCAmelCase : str = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
UpperCAmelCase : Optional[Any] = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
UpperCAmelCase : Union[str, Any] = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
UpperCAmelCase : Tuple = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
UpperCAmelCase : List[Any] = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : Optional[Any] = HfArgumentParser(lowercase_ )
UpperCAmelCase : List[str] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
UpperCAmelCase : List[str] = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
UpperCAmelCase : Any = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCAmelCase : Optional[Any] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
UpperCAmelCase : Dict = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCAmelCase : List[Any] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
UpperCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCAmelCase_ ( self : List[str] ) -> int:
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Literal["titi", "toto", 42] = "toto"
UpperCAmelCase : Dict = HfArgumentParser(lowercase_ )
UpperCAmelCase : Any = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Any = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
UpperCAmelCase : List[Any] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
UpperCAmelCase : Union[str, Any] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase : List[str] = HfArgumentParser(lowercase_ )
UpperCAmelCase : Dict = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_ )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_ )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_ )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
UpperCAmelCase : int = parser.parse_args([] )
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCAmelCase : Optional[Any] = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : Any = argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_ )
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message' )
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_ )
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_ )
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_ )
UpperCAmelCase : str = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
UpperCAmelCase : Union[str, Any] = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
UpperCAmelCase : Optional[Any] = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[] ) )
UpperCAmelCase : List[Any] = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
UpperCAmelCase : Tuple = HfArgumentParser(lowercase_ )
UpperCAmelCase : Tuple = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_ )
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : Union[str, Any] = HfArgumentParser(lowercase_ )
UpperCAmelCase : int = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_ )
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : str = HfArgumentParser(lowercase_ )
UpperCAmelCase : str = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
UpperCAmelCase : str = parser.parse_dict(lowercase_ )[0]
UpperCAmelCase : Union[str, Any] = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
UpperCAmelCase : Tuple = HfArgumentParser(lowercase_ )
UpperCAmelCase : List[str] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = HfArgumentParser(lowercase_ )
UpperCAmelCase : Optional[int] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Union[str, Any] = os.path.join(lowercase_ , 'temp_json' )
os.mkdir(lowercase_ )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(lowercase_ , lowercase_ )
UpperCAmelCase : str = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
UpperCAmelCase : Tuple = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
UpperCAmelCase : Dict = HfArgumentParser(lowercase_ )
UpperCAmelCase : List[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Dict = os.path.join(lowercase_ , 'temp_yaml' )
os.mkdir(lowercase_ )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(lowercase_ , lowercase_ )
UpperCAmelCase : List[str] = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
UpperCAmelCase : Any = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
UpperCAmelCase : Dict = HfArgumentParser(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 716 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.